diff --git a/.gitignore b/.gitignore
index 40a538b..7405b78 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,7 @@
#
build/
+*.pyc
+obj_dir
+examples/*/axi_module*
+python/verilog/testcases/*/axi_module*
+apps/*.py
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c0c4bbe..c783ea5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,21 +1,10 @@
-# Copyright 2011,2012,2014,2016 Free Software Foundation, Inc.
+# Copyright 2011-2020 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Project setup
@@ -39,63 +28,35 @@ set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "")
# Make sure our local CMake Modules path comes first
list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules)
+# Find gnuradio to get access to the cmake modules
+find_package(Gnuradio "3.10" REQUIRED)
# Set the version information here
set(VERSION_MAJOR 1)
-set(VERSION_API 0)
-set(VERSION_ABI 0)
-set(VERSION_PATCH git)
+set(VERSION_API 0)
+set(VERSION_ABI 0)
+set(VERSION_PATCH 0)
-# Set cmake policies.
-# This will suppress developer warnings during the cmake process that can occur
-# if a newer cmake version than the minimum is used.
+cmake_policy(SET CMP0011 NEW)
-if(POLICY CMP0026)
- cmake_policy(SET CMP0026 OLD)
-endif()
-if(POLICY CMP0043)
- cmake_policy(SET CMP0043 OLD)
-endif()
-if(POLICY CMP0045)
- cmake_policy(SET CMP0045 OLD)
-endif()
-if(POLICY CMP0046)
- cmake_policy(SET CMP0046 OLD)
-endif()
+# Enable generation of compile_commands.json for code completion engines
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
########################################################################
-# Compiler specific setup
+# Minimum Version Requirements
########################################################################
-if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT WIN32)
- #http://gcc.gnu.org/wiki/Visibility
- add_definitions(-fvisibility=hidden)
-endif()
+
+include(GrMinReq)
########################################################################
-# Find boost
+# Compiler settings
########################################################################
-if(UNIX AND EXISTS "/usr/lib64")
- list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix
-endif(UNIX AND EXISTS "/usr/lib64")
-set(Boost_ADDITIONAL_VERSIONS
- "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39"
- "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44"
- "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49"
- "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54"
- "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59"
- "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64"
- "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69"
-)
-find_package(Boost "1.35" COMPONENTS filesystem system)
-if(NOT Boost_FOUND)
- message(FATAL_ERROR "Boost required to compile verilog")
-endif()
+include(GrCompilerSettings)
########################################################################
# Install directories
########################################################################
-find_package(Gnuradio "3.8" REQUIRED)
include(GrVersion)
include(GrPlatform) #define LIB_SUFFIX
@@ -104,8 +65,8 @@ if(NOT CMAKE_MODULES_DIR)
set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake)
endif(NOT CMAKE_MODULES_DIR)
-set(GR_INCLUDE_DIR include/verilog)
-set(GR_CMAKE_DIR ${CMAKE_MODULES_DIR}/${CMAKE_PROJECT_NAME})
+set(GR_INCLUDE_DIR include/gnuradio/verilog)
+set(GR_CMAKE_DIR ${CMAKE_MODULES_DIR}/gnuradio-verilog)
set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME})
set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME})
set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d)
@@ -134,12 +95,8 @@ endif(APPLE)
########################################################################
# Find gnuradio build dependencies
########################################################################
-find_package(CppUnit)
find_package(Doxygen)
-if(NOT CPPUNIT_FOUND)
- message(FATAL_ERROR "CppUnit required to compile verilog")
-endif()
########################################################################
# Setup doxygen option
@@ -165,19 +122,32 @@ add_custom_target(uninstall
########################################################################
# Add subdirectories
########################################################################
-add_subdirectory(include/verilog)
+add_subdirectory(include/gnuradio/verilog)
add_subdirectory(lib)
add_subdirectory(apps)
add_subdirectory(docs)
-add_subdirectory(swig)
-add_subdirectory(python)
-add_subdirectory(grc)
+# NOTE: manually update below to use GRC to generate C++ flowgraphs w/o python
+if(ENABLE_PYTHON)
+ message(STATUS "PYTHON and GRC components are enabled")
+ add_subdirectory(python/verilog)
+ add_subdirectory(grc)
add_subdirectory(templates)
+else(ENABLE_PYTHON)
+ message(STATUS "PYTHON and GRC components are disabled")
+endif(ENABLE_PYTHON)
########################################################################
# Install cmake search helper for this library
########################################################################
-install(FILES cmake/Modules/verilogConfig.cmake
- DESTINATION ${CMAKE_MODULES_DIR}/${CMAKE_PROJECT_NAME}
+install(FILES cmake/Modules/gnuradio-verilogConfig.cmake
+ DESTINATION ${GR_CMAKE_DIR}
)
+
+include(CMakePackageConfigHelpers)
+configure_package_config_file(
+ ${PROJECT_SOURCE_DIR}/cmake/Modules/targetConfig.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/cmake/Modules/${target}Config.cmake
+ INSTALL_DESTINATION ${GR_CMAKE_DIR}
+)
+
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index f288702..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
diff --git a/MANIFEST.md b/MANIFEST.md
index 1b947d9..973d619 100644
--- a/MANIFEST.md
+++ b/MANIFEST.md
@@ -7,6 +7,7 @@ author:
copyright_owner:
- Copyright Owner 1
license:
+gr_supported_version: # Put a comma separated list of supported GR versions here
#repo: # Put the URL of the repository here, or leave blank for default
#website: # If you have a separate project website, put it here
#icon: # Put a URL to a square image here that will be used as an icon on CGRAN
diff --git a/README.md b/README.md
index a01fd52..810d0fc 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ First you need to install the dependencies (see below).
Then, you need to download this repository
```bash
-$ git clone https://github.com/B0WEN-HU/gr-verilog.git
+$ git clone https://github.com/gnuradio/gr-verilog.git
```
After this, gr-verilog should be installed as any other GNU Radio out-of-tree module.
@@ -88,4 +88,4 @@ The `Complex` type of `Verilog AXI`, `verilog_axi_cc`, is not the block that is
## Future Work
Add more examples.
-Bring verilog_general_xx into the module.
\ No newline at end of file
+Bring verilog_general_xx into the module.
diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt
index c837d77..c241798 100644
--- a/apps/CMakeLists.txt
+++ b/apps/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
include(GrPython)
diff --git a/apps/verilog_axi_bb_demo.grc b/apps/verilog_axi_bb_demo.grc
new file mode 100644
index 0000000..64c0419
--- /dev/null
+++ b/apps/verilog_axi_bb_demo.grc
@@ -0,0 +1,349 @@
+options:
+ parameters:
+ author: Bowen Hu
+ catch_exceptions: 'True'
+ category: '[GRC Hier Blocks]'
+ cmake_opt: ''
+ comment: ''
+ copyright: ''
+ description: This is a demo of verilog_axi_ff block
+ gen_cmake: 'On'
+ gen_linking: dynamic
+ generate_options: qt_gui
+ hier_block_src_path: '.:'
+ id: verilog_axi_ff_demo
+ max_nouts: '0'
+ output_language: python
+ placement: (0,0)
+ qt_qss_theme: ''
+ realtime_scheduling: ''
+ run: 'True'
+ run_command: '{python} -u {filename}'
+ run_options: prompt
+ sizing_mode: fixed
+ thread_safe_setters: ''
+ title: verilog_axi_ii demo
+ window_size: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 8]
+ rotation: 0
+ state: enabled
+
+blocks:
+- name: samp_rate
+ id: variable
+ parameters:
+ comment: ''
+ value: '32000'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 132]
+ rotation: 0
+ state: enabled
+- name: analog_sig_source_x_0
+ id: analog_sig_source_x
+ parameters:
+ affinity: ''
+ alias: ''
+ amp: '50'
+ comment: ''
+ freq: '1000'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ offset: '0'
+ phase: '0'
+ samp_rate: samp_rate
+ type: byte
+ waveform: analog.GR_COS_WAVE
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [152, 256.0]
+ rotation: 0
+ state: enabled
+- name: blocks_char_to_float_0
+ id: blocks_char_to_float
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ scale: '1'
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [848, 292.0]
+ rotation: 0
+ state: true
+- name: blocks_char_to_float_0_0
+ id: blocks_char_to_float
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ scale: '1'
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [848, 420.0]
+ rotation: 0
+ state: true
+- name: blocks_throttle_0
+ id: blocks_throttle
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ ignoretag: 'True'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ samples_per_second: samp_rate
+ type: byte
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [392, 292]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_0
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: Before Verilog AXI
+ label10: ''
+ label2: After Verilog AXI
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '"Input"'
+ nconnections: '1'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '4'
+ ymin: '-4'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [1024, 268.0]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_0_0
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: Before Verilog AXI
+ label10: ''
+ label2: After Verilog AXI
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '"Output"'
+ nconnections: '1'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '4'
+ ymin: '-4'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [1024, 396.0]
+ rotation: 0
+ state: enabled
+- name: verilog_axi_xx_0
+ id: verilog_axi_xx
+ parameters:
+ IO_ratio: '1.0'
+ affinity: ''
+ alias: ''
+ comment: ''
+ file: examples/double/double_axi.v
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ module_flag: '0'
+ overwrite: 'True'
+ skip_output_items: '0'
+ type: byte
+ verilator_options: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [608, 384.0]
+ rotation: 0
+ state: enabled
+
+connections:
+- [analog_sig_source_x_0, '0', blocks_throttle_0, '0']
+- [blocks_char_to_float_0, '0', qtgui_time_sink_x_0, '0']
+- [blocks_char_to_float_0_0, '0', qtgui_time_sink_x_0_0, '0']
+- [blocks_throttle_0, '0', blocks_char_to_float_0, '0']
+- [blocks_throttle_0, '0', verilog_axi_xx_0, '0']
+- [verilog_axi_xx_0, '0', blocks_char_to_float_0_0, '0']
+
+metadata:
+ file_format: 1
diff --git a/apps/verilog_axi_ff_demo.grc b/apps/verilog_axi_ff_demo.grc
index 4c7a6b3..45f367e 100644
--- a/apps/verilog_axi_ff_demo.grc
+++ b/apps/verilog_axi_ff_demo.grc
@@ -1,687 +1,313 @@
-
-
-
- Thu Aug 22 16:50:22 2019
-
- options
-
- author
- Bowen Hu
-
-
- window_size
-
-
-
- category
- [GRC Hier Blocks]
-
-
- comment
-
-
-
- description
- This is a demo of verilog_axi_ff block
-
-
- _enabled
- True
-
-
- _coordinate
- (8, 8)
-
-
- _rotation
- 0
-
-
- generate_options
- qt_gui
-
-
- hier_block_src_path
- .:
-
-
- id
- verilog_axi_ff_demo
-
-
- max_nouts
- 0
-
-
- qt_qss_theme
-
-
-
- realtime_scheduling
-
-
-
- run_command
- {python} -u {filename}
-
-
- run_options
- prompt
-
-
- run
- True
-
-
- thread_safe_setters
-
-
-
- title
- verilog_axi_ii demo
-
-
-
- variable
-
- comment
-
-
-
- _enabled
- True
-
-
- _coordinate
- (8, 132)
-
-
- _rotation
- 0
-
-
- id
- samp_rate
-
-
- value
- 32000
-
-
-
- analog_sig_source_x
-
- amp
- 1
-
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- freq
- 1000
-
-
- _coordinate
- (208, 260)
-
-
- _rotation
- 0
-
-
- id
- analog_sig_source_x_0
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- offset
- 0
-
-
- type
- float
-
-
- samp_rate
- samp_rate
-
-
- waveform
- analog.GR_COS_WAVE
-
-
-
- blocks_throttle
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (392, 292)
-
-
- _rotation
- 0
-
-
- id
- blocks_throttle_0
-
-
- ignoretag
- True
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- samples_per_second
- samp_rate
-
-
- type
- float
-
-
- vlen
- 1
-
-
-
- qtgui_time_sink_x
-
- autoscale
- False
-
-
- axislabels
- True
-
-
- alias
-
-
-
- comment
-
-
-
- ctrlpanel
- False
-
-
- affinity
-
-
-
- entags
- True
-
-
- _enabled
- True
-
-
- _coordinate
- (920, 292)
-
-
- gui_hint
-
-
-
- _rotation
- 0
-
-
- grid
- False
-
-
- id
- qtgui_time_sink_x_0
-
-
- legend
- True
-
-
- alpha1
- 1.0
-
-
- color1
- "blue"
-
-
- label1
- Before Verilog AXI
-
-
- marker1
- -1
-
-
- style1
- 1
-
-
- width1
- 1
-
-
- alpha10
- 1.0
-
-
- color10
- "blue"
-
-
- label10
-
-
-
- marker10
- -1
-
-
- style10
- 1
-
-
- width10
- 1
-
-
- alpha2
- 1.0
-
-
- color2
- "red"
-
-
- label2
- After Verilog AXI
-
-
- marker2
- -1
-
-
- style2
- 1
-
-
- width2
- 1
-
-
- alpha3
- 1.0
-
-
- color3
- "green"
-
-
- label3
-
-
-
- marker3
- -1
-
-
- style3
- 1
-
-
- width3
- 1
-
-
- alpha4
- 1.0
-
-
- color4
- "black"
-
-
- label4
-
-
-
- marker4
- -1
-
-
- style4
- 1
-
-
- width4
- 1
-
-
- alpha5
- 1.0
-
-
- color5
- "cyan"
-
-
- label5
-
-
-
- marker5
- -1
-
-
- style5
- 1
-
-
- width5
- 1
-
-
- alpha6
- 1.0
-
-
- color6
- "magenta"
-
-
- label6
-
-
-
- marker6
- -1
-
-
- style6
- 1
-
-
- width6
- 1
-
-
- alpha7
- 1.0
-
-
- color7
- "yellow"
-
-
- label7
-
-
-
- marker7
- -1
-
-
- style7
- 1
-
-
- width7
- 1
-
-
- alpha8
- 1.0
-
-
- color8
- "dark red"
-
-
- label8
-
-
-
- marker8
- -1
-
-
- style8
- 1
-
-
- width8
- 1
-
-
- alpha9
- 1.0
-
-
- color9
- "dark green"
-
-
- label9
-
-
-
- marker9
- -1
-
-
- style9
- 1
-
-
- width9
- 1
-
-
- name
- ""
-
-
- nconnections
- 2
-
-
- size
- 1024
-
-
- srate
- samp_rate
-
-
- stemplot
- False
-
-
- tr_chan
- 0
-
-
- tr_delay
- 0
-
-
- tr_level
- 0.0
-
-
- tr_mode
- qtgui.TRIG_MODE_FREE
-
-
- tr_slope
- qtgui.TRIG_SLOPE_POS
-
-
- tr_tag
- ""
-
-
- type
- float
-
-
- update_time
- 0.10
-
-
- ylabel
- Amplitude
-
-
- yunit
- ""
-
-
- ymax
- 4
-
-
- ymin
- -4
-
-
-
- verilog_axi_xx
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (584, 444)
-
-
- _rotation
- 0
-
-
- id
- verilog_axi_xx_0
-
-
- IO_ratio
- 1.0
-
-
- type
- float
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- module_flag
- 0
-
-
- overwrite
- True
-
-
- skip_output_items
- 0
-
-
- verilator_options
-
-
-
- file
-
-
-
-
- analog_sig_source_x_0
- blocks_throttle_0
- 0
- 0
-
-
- blocks_throttle_0
- qtgui_time_sink_x_0
- 0
- 0
-
-
- blocks_throttle_0
- verilog_axi_xx_0
- 0
- 0
-
-
- verilog_axi_xx_0
- qtgui_time_sink_x_0
- 0
- 1
-
-
+options:
+ parameters:
+ author: Bowen Hu
+ catch_exceptions: 'True'
+ category: '[GRC Hier Blocks]'
+ cmake_opt: ''
+ comment: ''
+ copyright: ''
+ description: This is a demo of verilog_axi_ff block
+ gen_cmake: 'On'
+ gen_linking: dynamic
+ generate_options: qt_gui
+ hier_block_src_path: '.:'
+ id: verilog_axi_ff_demo
+ max_nouts: '0'
+ output_language: python
+ placement: (0,0)
+ qt_qss_theme: ''
+ realtime_scheduling: ''
+ run: 'True'
+ run_command: '{python} -u {filename}'
+ run_options: prompt
+ sizing_mode: fixed
+ thread_safe_setters: ''
+ title: verilog_axi_ii demo
+ window_size: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 8]
+ rotation: 0
+ state: enabled
+
+blocks:
+- name: samp_rate
+ id: variable
+ parameters:
+ comment: ''
+ value: '32000'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 132]
+ rotation: 0
+ state: enabled
+- name: analog_sig_source_x_0
+ id: analog_sig_source_x
+ parameters:
+ affinity: ''
+ alias: ''
+ amp: '1'
+ comment: ''
+ freq: '1000'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ offset: '0'
+ phase: '0'
+ samp_rate: samp_rate
+ type: float
+ waveform: analog.GR_COS_WAVE
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [216, 264.0]
+ rotation: 0
+ state: enabled
+- name: blocks_throttle_0
+ id: blocks_throttle
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ ignoretag: 'True'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ samples_per_second: samp_rate
+ type: float
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [432, 300.0]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_0
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: Before Verilog AXI
+ label10: ''
+ label2: After Verilog AXI
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '"Input"'
+ nconnections: '1'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '4'
+ ymin: '-4'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [872, 276.0]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_0_0
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: Before Verilog AXI
+ label10: ''
+ label2: After Verilog AXI
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '"Ouput"'
+ nconnections: '1'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '4'
+ ymin: '-4'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [872, 372.0]
+ rotation: 0
+ state: enabled
+- name: verilog_axi_xx_0
+ id: verilog_axi_xx
+ parameters:
+ IO_ratio: '1.0'
+ affinity: ''
+ alias: ''
+ comment: ''
+ file: examples/double/double_axi.v
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ module_flag: '0'
+ overwrite: 'True'
+ skip_output_items: '0'
+ type: float
+ verilator_options: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [664, 360.0]
+ rotation: 0
+ state: enabled
+
+connections:
+- [analog_sig_source_x_0, '0', blocks_throttle_0, '0']
+- [blocks_throttle_0, '0', qtgui_time_sink_x_0, '0']
+- [blocks_throttle_0, '0', verilog_axi_xx_0, '0']
+- [verilog_axi_xx_0, '0', qtgui_time_sink_x_0_0, '0']
+
+metadata:
+ file_format: 1
diff --git a/apps/verilog_axi_ii_demo.grc b/apps/verilog_axi_ii_demo.grc
index 715bc78..09a6d21 100644
--- a/apps/verilog_axi_ii_demo.grc
+++ b/apps/verilog_axi_ii_demo.grc
@@ -1,1156 +1,347 @@
-
-
-
- Thu Aug 22 14:47:29 2019
-
- options
-
- author
- Bowen Hu
-
-
- window_size
-
-
-
- category
- [GRC Hier Blocks]
-
-
- comment
-
-
-
- description
- This is a demo of verilog_axi_ii block
-
-
- _enabled
- True
-
-
- _coordinate
- (8, 8)
-
-
- _rotation
- 0
-
-
- generate_options
- qt_gui
-
-
- hier_block_src_path
- .:
-
-
- id
- verilog_axi_ii_demo
-
-
- max_nouts
- 0
-
-
- qt_qss_theme
-
-
-
- realtime_scheduling
-
-
-
- run_command
- {python} -u {filename}
-
-
- run_options
- prompt
-
-
- run
- True
-
-
- thread_safe_setters
-
-
-
- title
- verilog_axi_ii demo
-
-
-
- variable
-
- comment
-
-
-
- _enabled
- True
-
-
- _coordinate
- (8, 132)
-
-
- _rotation
- 0
-
-
- id
- samp_rate
-
-
- value
- 32000
-
-
-
- analog_fastnoise_source_x
-
- amp
- 32
-
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (112, 268)
-
-
- _rotation
- 0
-
-
- id
- analog_fastnoise_source_x_0
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- noise_type
- analog.GR_GAUSSIAN
-
-
- type
- int
-
-
- seed
- 0
-
-
- samples
- 8192
-
-
-
- blocks_int_to_float
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (760, 292)
-
-
- _rotation
- 0
-
-
- id
- blocks_int_to_float_0
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- scale
- 1
-
-
- vlen
- 1
-
-
-
- blocks_int_to_float
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (760, 484)
-
-
- _rotation
- 0
-
-
- id
- blocks_int_to_float_1
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- scale
- 1
-
-
- vlen
- 1
-
-
-
- blocks_throttle
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (328, 292)
-
-
- _rotation
- 0
-
-
- id
- blocks_throttle_0
-
-
- ignoretag
- True
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- samples_per_second
- samp_rate
-
-
- type
- int
-
-
- vlen
- 1
-
-
-
- qtgui_time_sink_x
-
- autoscale
- False
-
-
- axislabels
- True
-
-
- alias
-
-
-
- comment
-
-
-
- ctrlpanel
- False
-
-
- affinity
-
-
-
- entags
- True
-
-
- _enabled
- True
-
-
- _coordinate
- (936, 460)
-
-
- gui_hint
-
-
-
- _rotation
- 0
-
-
- grid
- False
-
-
- id
- qtgui_time_sink_x_0
-
-
- legend
- True
-
-
- alpha1
- 1.0
-
-
- color1
- "blue"
-
-
- label1
-
-
-
- marker1
- -1
-
-
- style1
- 1
-
-
- width1
- 1
-
-
- alpha10
- 1.0
-
-
- color10
- "blue"
-
-
- label10
-
-
-
- marker10
- -1
-
-
- style10
- 1
-
-
- width10
- 1
-
-
- alpha2
- 1.0
-
-
- color2
- "red"
-
-
- label2
-
-
-
- marker2
- -1
-
-
- style2
- 1
-
-
- width2
- 1
-
-
- alpha3
- 1.0
-
-
- color3
- "green"
-
-
- label3
-
-
-
- marker3
- -1
-
-
- style3
- 1
-
-
- width3
- 1
-
-
- alpha4
- 1.0
-
-
- color4
- "black"
-
-
- label4
-
-
-
- marker4
- -1
-
-
- style4
- 1
-
-
- width4
- 1
-
-
- alpha5
- 1.0
-
-
- color5
- "cyan"
-
-
- label5
-
-
-
- marker5
- -1
-
-
- style5
- 1
-
-
- width5
- 1
-
-
- alpha6
- 1.0
-
-
- color6
- "magenta"
-
-
- label6
-
-
-
- marker6
- -1
-
-
- style6
- 1
-
-
- width6
- 1
-
-
- alpha7
- 1.0
-
-
- color7
- "yellow"
-
-
- label7
-
-
-
- marker7
- -1
-
-
- style7
- 1
-
-
- width7
- 1
-
-
- alpha8
- 1.0
-
-
- color8
- "dark red"
-
-
- label8
-
-
-
- marker8
- -1
-
-
- style8
- 1
-
-
- width8
- 1
-
-
- alpha9
- 1.0
-
-
- color9
- "dark green"
-
-
- label9
-
-
-
- marker9
- -1
-
-
- style9
- 1
-
-
- width9
- 1
-
-
- name
- "After Verilog AXI"
-
-
- nconnections
- 1
-
-
- size
- 1024
-
-
- srate
- samp_rate
-
-
- stemplot
- False
-
-
- tr_chan
- 0
-
-
- tr_delay
- 0
-
-
- tr_level
- 0.0
-
-
- tr_mode
- qtgui.TRIG_MODE_FREE
-
-
- tr_slope
- qtgui.TRIG_SLOPE_POS
-
-
- tr_tag
- ""
-
-
- type
- float
-
-
- update_time
- 0.10
-
-
- ylabel
- Amplitude
-
-
- yunit
- ""
-
-
- ymax
- 256
-
-
- ymin
- -256
-
-
-
- qtgui_time_sink_x
-
- autoscale
- False
-
-
- axislabels
- True
-
-
- alias
-
-
-
- comment
-
-
-
- ctrlpanel
- False
-
-
- affinity
-
-
-
- entags
- True
-
-
- _enabled
- True
-
-
- _coordinate
- (936, 268)
-
-
- gui_hint
-
-
-
- _rotation
- 0
-
-
- grid
- False
-
-
- id
- qtgui_time_sink_x_1
-
-
- legend
- True
-
-
- alpha1
- 1.0
-
-
- color1
- "blue"
-
-
- label1
-
-
-
- marker1
- -1
-
-
- style1
- 1
-
-
- width1
- 1
-
-
- alpha10
- 1.0
-
-
- color10
- "blue"
-
-
- label10
-
-
-
- marker10
- -1
-
-
- style10
- 1
-
-
- width10
- 1
-
-
- alpha2
- 1.0
-
-
- color2
- "red"
-
-
- label2
-
-
-
- marker2
- -1
-
-
- style2
- 1
-
-
- width2
- 1
-
-
- alpha3
- 1.0
-
-
- color3
- "green"
-
-
- label3
-
-
-
- marker3
- -1
-
-
- style3
- 1
-
-
- width3
- 1
-
-
- alpha4
- 1.0
-
-
- color4
- "black"
-
-
- label4
-
-
-
- marker4
- -1
-
-
- style4
- 1
-
-
- width4
- 1
-
-
- alpha5
- 1.0
-
-
- color5
- "cyan"
-
-
- label5
-
-
-
- marker5
- -1
-
-
- style5
- 1
-
-
- width5
- 1
-
-
- alpha6
- 1.0
-
-
- color6
- "magenta"
-
-
- label6
-
-
-
- marker6
- -1
-
-
- style6
- 1
-
-
- width6
- 1
-
-
- alpha7
- 1.0
-
-
- color7
- "yellow"
-
-
- label7
-
-
-
- marker7
- -1
-
-
- style7
- 1
-
-
- width7
- 1
-
-
- alpha8
- 1.0
-
-
- color8
- "dark red"
-
-
- label8
-
-
-
- marker8
- -1
-
-
- style8
- 1
-
-
- width8
- 1
-
-
- alpha9
- 1.0
-
-
- color9
- "dark green"
-
-
- label9
-
-
-
- marker9
- -1
-
-
- style9
- 1
-
-
- width9
- 1
-
-
- name
- "Before Verilog AXI"
-
-
- nconnections
- 1
-
-
- size
- 1024
-
-
- srate
- samp_rate
-
-
- stemplot
- False
-
-
- tr_chan
- 0
-
-
- tr_delay
- 0
-
-
- tr_level
- 0.0
-
-
- tr_mode
- qtgui.TRIG_MODE_FREE
-
-
- tr_slope
- qtgui.TRIG_SLOPE_POS
-
-
- tr_tag
- ""
-
-
- type
- float
-
-
- update_time
- 0.10
-
-
- ylabel
- Amplitude
-
-
- yunit
- ""
-
-
- ymax
- 256
-
-
- ymin
- -256
-
-
-
- verilog_axi_xx
-
- alias
-
-
-
- comment
-
-
-
- affinity
-
-
-
- _enabled
- True
-
-
- _coordinate
- (512, 444)
-
-
- _rotation
- 0
-
-
- id
- verilog_axi_xx_0
-
-
- IO_ratio
- 1.0
-
-
- type
- int
-
-
- maxoutbuf
- 0
-
-
- minoutbuf
- 0
-
-
- module_flag
- 0
-
-
- overwrite
- True
-
-
- skip_output_items
- 0
-
-
- verilator_options
-
-
-
- file
-
-
-
-
- analog_fastnoise_source_x_0
- blocks_throttle_0
- 0
- 0
-
-
- blocks_int_to_float_0
- qtgui_time_sink_x_1
- 0
- 0
-
-
- blocks_int_to_float_1
- qtgui_time_sink_x_0
- 0
- 0
-
-
- blocks_throttle_0
- blocks_int_to_float_0
- 0
- 0
-
-
- blocks_throttle_0
- verilog_axi_xx_0
- 0
- 0
-
-
- verilog_axi_xx_0
- blocks_int_to_float_1
- 0
- 0
-
-
+options:
+ parameters:
+ author: Bowen Hu
+ catch_exceptions: 'True'
+ category: '[GRC Hier Blocks]'
+ cmake_opt: ''
+ comment: ''
+ copyright: ''
+ description: This is a demo of verilog_axi_ii block
+ gen_cmake: 'On'
+ gen_linking: dynamic
+ generate_options: qt_gui
+ hier_block_src_path: '.:'
+ id: verilog_axi_ii_demo
+ max_nouts: '0'
+ output_language: python
+ placement: (0,0)
+ qt_qss_theme: ''
+ realtime_scheduling: ''
+ run: 'True'
+ run_command: '{python} -u {filename}'
+ run_options: prompt
+ sizing_mode: fixed
+ thread_safe_setters: ''
+ title: verilog_axi_ii demo
+ window_size: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 8]
+ rotation: 0
+ state: enabled
+
+blocks:
+- name: samp_rate
+ id: variable
+ parameters:
+ comment: ''
+ value: '32000'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 132]
+ rotation: 0
+ state: enabled
+- name: analog_fastnoise_source_x_0
+ id: analog_fastnoise_source_x
+ parameters:
+ affinity: ''
+ alias: ''
+ amp: '32'
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ noise_type: analog.GR_GAUSSIAN
+ samples: '8192'
+ seed: '0'
+ type: int
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [112, 268]
+ rotation: 0
+ state: enabled
+- name: blocks_int_to_float_0
+ id: blocks_int_to_float
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ scale: '1'
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [760, 292]
+ rotation: 0
+ state: enabled
+- name: blocks_int_to_float_1
+ id: blocks_int_to_float
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ scale: '1'
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [760, 428.0]
+ rotation: 0
+ state: enabled
+- name: blocks_throttle_0
+ id: blocks_throttle
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ ignoretag: 'True'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ samples_per_second: samp_rate
+ type: int
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [328, 292]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_0
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: ''
+ label10: ''
+ label2: ''
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '"After Verilog AXI"'
+ nconnections: '1'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '256'
+ ymin: '-256'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [936, 404.0]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_1
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: ''
+ label10: ''
+ label2: ''
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '"Before Verilog AXI"'
+ nconnections: '1'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '256'
+ ymin: '-256'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [936, 268]
+ rotation: 0
+ state: enabled
+- name: verilog_axi_xx_0
+ id: verilog_axi_xx
+ parameters:
+ IO_ratio: '1.0'
+ affinity: ''
+ alias: ''
+ comment: ''
+ file: examples/double/double_axi.v
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ module_flag: '0'
+ overwrite: 'True'
+ skip_output_items: '0'
+ type: int
+ verilator_options: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [528, 392.0]
+ rotation: 0
+ state: enabled
+
+connections:
+- [analog_fastnoise_source_x_0, '0', blocks_throttle_0, '0']
+- [blocks_int_to_float_0, '0', qtgui_time_sink_x_1, '0']
+- [blocks_int_to_float_1, '0', qtgui_time_sink_x_0, '0']
+- [blocks_throttle_0, '0', blocks_int_to_float_0, '0']
+- [blocks_throttle_0, '0', verilog_axi_xx_0, '0']
+- [verilog_axi_xx_0, '0', blocks_int_to_float_1, '0']
+
+metadata:
+ file_format: 1
diff --git a/apps/verilog_axi_ss_demo.grc b/apps/verilog_axi_ss_demo.grc
new file mode 100644
index 0000000..38bf7b8
--- /dev/null
+++ b/apps/verilog_axi_ss_demo.grc
@@ -0,0 +1,252 @@
+options:
+ parameters:
+ author: Bowen Hu
+ catch_exceptions: 'True'
+ category: '[GRC Hier Blocks]'
+ cmake_opt: ''
+ comment: ''
+ copyright: ''
+ description: This is a demo of verilog_axi_ff block
+ gen_cmake: 'On'
+ gen_linking: dynamic
+ generate_options: qt_gui
+ hier_block_src_path: '.:'
+ id: verilog_axi_ff_demo
+ max_nouts: '0'
+ output_language: python
+ placement: (0,0)
+ qt_qss_theme: ''
+ realtime_scheduling: ''
+ run: 'True'
+ run_command: '{python} -u {filename}'
+ run_options: prompt
+ sizing_mode: fixed
+ thread_safe_setters: ''
+ title: verilog_axi_ii demo
+ window_size: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 8]
+ rotation: 0
+ state: enabled
+
+blocks:
+- name: samp_rate
+ id: variable
+ parameters:
+ comment: ''
+ value: '32000'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [8, 132]
+ rotation: 0
+ state: enabled
+- name: analog_sig_source_x_0
+ id: analog_sig_source_x
+ parameters:
+ affinity: ''
+ alias: ''
+ amp: '100'
+ comment: ''
+ freq: '1000'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ offset: '0'
+ phase: '0'
+ samp_rate: samp_rate
+ type: short
+ waveform: analog.GR_COS_WAVE
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [152, 256.0]
+ rotation: 0
+ state: enabled
+- name: blocks_short_to_float_0
+ id: blocks_short_to_float
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ scale: '1'
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [848, 292.0]
+ rotation: 0
+ state: true
+- name: blocks_short_to_float_0_0
+ id: blocks_short_to_float
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ scale: '1'
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [848, 340.0]
+ rotation: 0
+ state: true
+- name: blocks_throttle_0
+ id: blocks_throttle
+ parameters:
+ affinity: ''
+ alias: ''
+ comment: ''
+ ignoretag: 'True'
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ samples_per_second: samp_rate
+ type: short
+ vlen: '1'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [392, 292]
+ rotation: 0
+ state: enabled
+- name: qtgui_time_sink_x_0
+ id: qtgui_time_sink_x
+ parameters:
+ affinity: ''
+ alias: ''
+ alpha1: '1.0'
+ alpha10: '1.0'
+ alpha2: '1.0'
+ alpha3: '1.0'
+ alpha4: '1.0'
+ alpha5: '1.0'
+ alpha6: '1.0'
+ alpha7: '1.0'
+ alpha8: '1.0'
+ alpha9: '1.0'
+ autoscale: 'True'
+ axislabels: 'True'
+ color1: blue
+ color10: dark blue
+ color2: red
+ color3: green
+ color4: black
+ color5: cyan
+ color6: magenta
+ color7: yellow
+ color8: dark red
+ color9: dark green
+ comment: ''
+ ctrlpanel: 'False'
+ entags: 'True'
+ grid: 'True'
+ gui_hint: ''
+ label1: Before Verilog AXI
+ label10: ''
+ label2: After Verilog AXI
+ label3: ''
+ label4: ''
+ label5: ''
+ label6: ''
+ label7: ''
+ label8: ''
+ label9: ''
+ legend: 'True'
+ marker1: '-1'
+ marker10: '-1'
+ marker2: '-1'
+ marker3: '-1'
+ marker4: '-1'
+ marker5: '-1'
+ marker6: '-1'
+ marker7: '-1'
+ marker8: '-1'
+ marker9: '-1'
+ name: '""'
+ nconnections: '2'
+ size: '1024'
+ srate: samp_rate
+ stemplot: 'False'
+ style1: '1'
+ style10: '1'
+ style2: '1'
+ style3: '1'
+ style4: '1'
+ style5: '1'
+ style6: '1'
+ style7: '1'
+ style8: '1'
+ style9: '1'
+ tr_chan: '0'
+ tr_delay: '0'
+ tr_level: '0.0'
+ tr_mode: qtgui.TRIG_MODE_FREE
+ tr_slope: qtgui.TRIG_SLOPE_POS
+ tr_tag: '""'
+ type: float
+ update_time: '0.10'
+ width1: '1'
+ width10: '1'
+ width2: '1'
+ width3: '1'
+ width4: '1'
+ width5: '1'
+ width6: '1'
+ width7: '1'
+ width8: '1'
+ width9: '1'
+ ylabel: Amplitude
+ ymax: '4'
+ ymin: '-4'
+ yunit: '""'
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [1040, 300.0]
+ rotation: 0
+ state: enabled
+- name: verilog_axi_xx_0
+ id: verilog_axi_xx
+ parameters:
+ IO_ratio: '1.0'
+ affinity: ''
+ alias: ''
+ comment: ''
+ file: examples/double/double_axi.v
+ maxoutbuf: '0'
+ minoutbuf: '0'
+ module_flag: '0'
+ overwrite: 'True'
+ skip_output_items: '0'
+ type: short
+ verilator_options: ''
+ states:
+ bus_sink: false
+ bus_source: false
+ bus_structure: null
+ coordinate: [584, 444]
+ rotation: 0
+ state: enabled
+
+connections:
+- [analog_sig_source_x_0, '0', blocks_throttle_0, '0']
+- [blocks_short_to_float_0, '0', qtgui_time_sink_x_0, '0']
+- [blocks_short_to_float_0_0, '0', qtgui_time_sink_x_0, '1']
+- [blocks_throttle_0, '0', blocks_short_to_float_0, '0']
+- [blocks_throttle_0, '0', verilog_axi_xx_0, '0']
+- [verilog_axi_xx_0, '0', blocks_short_to_float_0_0, '0']
+
+metadata:
+ file_format: 1
diff --git a/cmake/Modules/FindCppUnit.cmake b/cmake/Modules/FindCppUnit.cmake
deleted file mode 100644
index f93ade3..0000000
--- a/cmake/Modules/FindCppUnit.cmake
+++ /dev/null
@@ -1,39 +0,0 @@
-# http://www.cmake.org/pipermail/cmake/2006-October/011446.html
-# Modified to use pkg config and use standard var names
-
-#
-# Find the CppUnit includes and library
-#
-# This module defines
-# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc.
-# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit.
-# CPPUNIT_FOUND, If false, do not try to use CppUnit.
-
-INCLUDE(FindPkgConfig)
-PKG_CHECK_MODULES(PC_CPPUNIT "cppunit")
-
-FIND_PATH(CPPUNIT_INCLUDE_DIRS
- NAMES cppunit/TestCase.h
- HINTS ${PC_CPPUNIT_INCLUDE_DIR}
- ${CMAKE_INSTALL_PREFIX}/include
- PATHS
- /usr/local/include
- /usr/include
-)
-
-FIND_LIBRARY(CPPUNIT_LIBRARIES
- NAMES cppunit
- HINTS ${PC_CPPUNIT_LIBDIR}
- ${CMAKE_INSTALL_PREFIX}/lib
- ${CMAKE_INSTALL_PREFIX}/lib64
- PATHS
- ${CPPUNIT_INCLUDE_DIRS}/../lib
- /usr/local/lib
- /usr/lib
-)
-
-LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS})
-
-INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
-MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
diff --git a/cmake/Modules/FindGnuradioRuntime.cmake b/cmake/Modules/FindGnuradioRuntime.cmake
deleted file mode 100644
index afed684..0000000
--- a/cmake/Modules/FindGnuradioRuntime.cmake
+++ /dev/null
@@ -1,36 +0,0 @@
-INCLUDE(FindPkgConfig)
-PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime)
-
-if(PC_GNURADIO_RUNTIME_FOUND)
- # look for include files
- FIND_PATH(
- GNURADIO_RUNTIME_INCLUDE_DIRS
- NAMES gnuradio/top_block.h
- HINTS $ENV{GNURADIO_RUNTIME_DIR}/include
- ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS}
- ${CMAKE_INSTALL_PREFIX}/include
- PATHS /usr/local/include
- /usr/include
- )
-
- # look for libs
- FIND_LIBRARY(
- GNURADIO_RUNTIME_LIBRARIES
- NAMES gnuradio-runtime
- HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib
- ${PC_GNURADIO_RUNTIME_LIBDIR}
- ${CMAKE_INSTALL_PREFIX}/lib/
- ${CMAKE_INSTALL_PREFIX}/lib64/
- PATHS /usr/local/lib
- /usr/local/lib64
- /usr/lib
- /usr/lib64
- )
-
- set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND})
-endif(PC_GNURADIO_RUNTIME_FOUND)
-
-INCLUDE(FindPackageHandleStandardArgs)
-# do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used.
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES)
-MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS)
diff --git a/cmake/Modules/GrMiscUtils.cmake b/cmake/Modules/GrMiscUtils.cmake
deleted file mode 100644
index 5bad57c..0000000
--- a/cmake/Modules/GrMiscUtils.cmake
+++ /dev/null
@@ -1,528 +0,0 @@
-# Copyright 2010-2011,2014 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE)
-
-########################################################################
-# Set global variable macro.
-# Used for subdirectories to export settings.
-# Example: include and library paths.
-########################################################################
-function(GR_SET_GLOBAL var)
- set(${var} ${ARGN} CACHE INTERNAL "" FORCE)
-endfunction(GR_SET_GLOBAL)
-
-########################################################################
-# Set the pre-processor definition if the condition is true.
-# - def the pre-processor definition to set and condition name
-########################################################################
-function(GR_ADD_COND_DEF def)
- if(${def})
- add_definitions(-D${def})
- endif(${def})
-endfunction(GR_ADD_COND_DEF)
-
-########################################################################
-# Check for a header and conditionally set a compile define.
-# - hdr the relative path to the header file
-# - def the pre-processor definition to set
-########################################################################
-function(GR_CHECK_HDR_N_DEF hdr def)
- include(CheckIncludeFileCXX)
- CHECK_INCLUDE_FILE_CXX(${hdr} ${def})
- GR_ADD_COND_DEF(${def})
-endfunction(GR_CHECK_HDR_N_DEF)
-
-########################################################################
-# Include subdirectory macro.
-# Sets the CMake directory variables,
-# includes the subdirectory CMakeLists.txt,
-# resets the CMake directory variables.
-#
-# This macro includes subdirectories rather than adding them
-# so that the subdirectory can affect variables in the level above.
-# This provides a work-around for the lack of convenience libraries.
-# This way a subdirectory can append to the list of library sources.
-########################################################################
-macro(GR_INCLUDE_SUBDIRECTORY subdir)
- #insert the current directories on the front of the list
- list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR})
- list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR})
-
- #set the current directories to the names of the subdirs
- set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir})
- set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir})
-
- #include the subdirectory CMakeLists to run it
- file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
- include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt)
-
- #reset the value of the current directories
- list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR)
- list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR)
-
- #pop the subdir names of the front of the list
- list(REMOVE_AT _cmake_source_dirs 0)
- list(REMOVE_AT _cmake_binary_dirs 0)
-endmacro(GR_INCLUDE_SUBDIRECTORY)
-
-########################################################################
-# Check if a compiler flag works and conditionally set a compile define.
-# - flag the compiler flag to check for
-# - have the variable to set with result
-########################################################################
-macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have)
- include(CheckCXXCompilerFlag)
- CHECK_CXX_COMPILER_FLAG(${flag} ${have})
- if(${have})
- if(${CMAKE_VERSION} VERSION_GREATER "2.8.4")
- STRING(FIND "${CMAKE_CXX_FLAGS}" "${flag}" flag_dup)
- if(${flag_dup} EQUAL -1)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
- endif(${flag_dup} EQUAL -1)
- endif(${CMAKE_VERSION} VERSION_GREATER "2.8.4")
- endif(${have})
-endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE)
-
-########################################################################
-# Generates the .la libtool file
-# This appears to generate libtool files that cannot be used by auto*.
-# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest])
-# Notice: there is not COMPONENT option, these will not get distributed.
-########################################################################
-function(GR_LIBTOOL)
- if(NOT DEFINED GENERATE_LIBTOOL)
- set(GENERATE_LIBTOOL OFF) #disabled by default
- endif()
-
- if(GENERATE_LIBTOOL)
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN})
-
- find_program(LIBTOOL libtool)
- if(LIBTOOL)
- include(CMakeMacroLibtoolFile)
- CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION})
- endif(LIBTOOL)
- endif(GENERATE_LIBTOOL)
-
-endfunction(GR_LIBTOOL)
-
-########################################################################
-# Do standard things to the library target
-# - set target properties
-# - make install rules
-# Also handle gnuradio custom naming conventions w/ extras mode.
-########################################################################
-function(GR_LIBRARY_FOO target)
- #parse the arguments for component names
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN})
-
- #set additional target properties
- set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER})
-
- #install the generated files like so...
- install(TARGETS ${target}
- LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file
- ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file
- RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file
- )
-
- #extras mode enabled automatically on linux
- if(NOT DEFINED LIBRARY_EXTRAS)
- set(LIBRARY_EXTRAS ${LINUX})
- endif()
-
- #special extras mode to enable alternative naming conventions
- if(LIBRARY_EXTRAS)
-
- #create .la file before changing props
- GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR})
-
- #give the library a special name with ultra-zero soversion
- set_target_properties(${target} PROPERTIES OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0")
- set(target_name lib${target}-${LIBVER}.so.0.0.0)
-
- #custom command to generate symlinks
- add_custom_command(
- TARGET ${target}
- POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
- COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
- COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install
- )
-
- #and install the extra symlinks
- install(
- FILES
- ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
- ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
- DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT}
- )
-
- endif(LIBRARY_EXTRAS)
-endfunction(GR_LIBRARY_FOO)
-
-########################################################################
-# Create a dummy custom command that depends on other targets.
-# Usage:
-# GR_GEN_TARGET_DEPS(unique_name target_deps ...)
-# ADD_CUSTOM_COMMAND( ${target_deps})
-#
-# Custom command cant depend on targets, but can depend on executables,
-# and executables can depend on targets. So this is the process:
-########################################################################
-function(GR_GEN_TARGET_DEPS name var)
- file(
- WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
- "int main(void){return 0;}\n"
- )
- execute_process(
- COMMAND ${CMAKE_COMMAND} -E copy_if_different
- ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
- ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp
- )
- add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp)
- if(ARGN)
- add_dependencies(${name} ${ARGN})
- endif(ARGN)
-
- if(CMAKE_CROSSCOMPILING)
- set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross
- else()
- set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE)
- endif()
-endfunction(GR_GEN_TARGET_DEPS)
-
-########################################################################
-# Control use of gr_logger
-# Usage:
-# GR_LOGGING()
-#
-# Will set ENABLE_GR_LOG to 1 by default.
-# Can manually set with -DENABLE_GR_LOG=0|1
-########################################################################
-function(GR_LOGGING)
- find_package(Log4cpp)
-
- OPTION(ENABLE_GR_LOG "Use gr_logger" ON)
- if(ENABLE_GR_LOG)
- # If gr_logger is enabled, make it usable
- add_definitions( -DENABLE_GR_LOG )
-
- # also test LOG4CPP; if we have it, use this version of the logger
- # otherwise, default to the stdout/stderr model.
- if(LOG4CPP_FOUND)
- SET(HAVE_LOG4CPP True CACHE INTERNAL "" FORCE)
- add_definitions( -DHAVE_LOG4CPP )
- else(not LOG4CPP_FOUND)
- SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE)
- SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE)
- SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE)
- SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE)
- endif(LOG4CPP_FOUND)
-
- SET(ENABLE_GR_LOG ${ENABLE_GR_LOG} CACHE INTERNAL "" FORCE)
-
- else(ENABLE_GR_LOG)
- SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE)
- SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE)
- SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE)
- SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE)
- endif(ENABLE_GR_LOG)
-
- message(STATUS "ENABLE_GR_LOG set to ${ENABLE_GR_LOG}.")
- message(STATUS "HAVE_LOG4CPP set to ${HAVE_LOG4CPP}.")
- message(STATUS "LOG4CPP_LIBRARIES set to ${LOG4CPP_LIBRARIES}.")
-
-endfunction(GR_LOGGING)
-
-########################################################################
-# Run GRCC to compile .grc files into .py files.
-#
-# Usage: GRCC(filename, directory)
-# - filenames: List of file name of .grc file
-# - directory: directory of built .py file - usually in
-# ${CMAKE_CURRENT_BINARY_DIR}
-# - Sets PYFILES: output converted GRC file names to Python files.
-########################################################################
-function(GRCC)
- # Extract directory from list of args, remove it for the list of filenames.
- list(GET ARGV -1 directory)
- list(REMOVE_AT ARGV -1)
- set(filenames ${ARGV})
- file(MAKE_DIRECTORY ${directory})
-
- SET(GRCC_COMMAND ${CMAKE_SOURCE_DIR}/gr-utils/python/grcc)
-
- # GRCC uses some stuff in grc and gnuradio-runtime, so we force
- # the known paths here
- list(APPEND PYTHONPATHS
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/gnuradio-runtime/python
- ${CMAKE_SOURCE_DIR}/gnuradio-runtime/lib/swig
- ${CMAKE_BINARY_DIR}/gnuradio-runtime/lib/swig
- )
-
- if(WIN32)
- #SWIG generates the python library files into a subdirectory.
- #Therefore, we must append this subdirectory into PYTHONPATH.
- #Only do this for the python directories matching the following:
- foreach(pydir ${PYTHONPATHS})
- get_filename_component(name ${pydir} NAME)
- if(name MATCHES "^(swig|lib|src)$")
- list(APPEND PYTHONPATHS ${pydir}/${CMAKE_BUILD_TYPE})
- endif()
- endforeach(pydir)
- endif(WIN32)
-
- file(TO_NATIVE_PATH "${PYTHONPATHS}" pypath)
-
- if(UNIX)
- list(APPEND pypath "$PYTHONPATH")
- string(REPLACE ";" ":" pypath "${pypath}")
- set(ENV{PYTHONPATH} ${pypath})
- endif(UNIX)
-
- if(WIN32)
- list(APPEND pypath "%PYTHONPATH%")
- string(REPLACE ";" "\\;" pypath "${pypath}")
- #list(APPEND environs "PYTHONPATH=${pypath}")
- set(ENV{PYTHONPATH} ${pypath})
- endif(WIN32)
-
- foreach(f ${filenames})
- execute_process(
- COMMAND ${GRCC_COMMAND} -d ${directory} ${f}
- )
- string(REPLACE ".grc" ".py" pyfile "${f}")
- string(REPLACE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" pyfile "${pyfile}")
- list(APPEND pyfiles ${pyfile})
- endforeach(f)
-
- set(PYFILES ${pyfiles} PARENT_SCOPE)
-endfunction(GRCC)
-
-########################################################################
-# Check if HAVE_PTHREAD_SETSCHEDPARAM and HAVE_SCHED_SETSCHEDULER
-# should be defined
-########################################################################
-macro(GR_CHECK_LINUX_SCHED_AVAIL)
-set(CMAKE_REQUIRED_LIBRARIES -lpthread)
- CHECK_CXX_SOURCE_COMPILES("
- #include
- int main(){
- pthread_t pthread;
- pthread_setschedparam(pthread, 0, 0);
- return 0;
- } " HAVE_PTHREAD_SETSCHEDPARAM
- )
- GR_ADD_COND_DEF(HAVE_PTHREAD_SETSCHEDPARAM)
-
- CHECK_CXX_SOURCE_COMPILES("
- #include
- int main(){
- pid_t pid;
- sched_setscheduler(pid, 0, 0);
- return 0;
- } " HAVE_SCHED_SETSCHEDULER
- )
- GR_ADD_COND_DEF(HAVE_SCHED_SETSCHEDULER)
-endmacro(GR_CHECK_LINUX_SCHED_AVAIL)
-
-########################################################################
-# Macros to generate source and header files from template
-########################################################################
-macro(GR_EXPAND_X_H component root)
-
- include(GrPython)
-
- file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
-"#!${PYTHON_EXECUTABLE}
-
-import sys, os, re
-sys.path.append('${GR_RUNTIME_PYTHONPATH}')
-sys.path.append('${CMAKE_SOURCE_DIR}/python')
-os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}'
-os.chdir('${CMAKE_CURRENT_BINARY_DIR}')
-
-if __name__ == '__main__':
- import build_utils
- root, inp = sys.argv[1:3]
- for sig in sys.argv[3:]:
- name = re.sub ('X+', sig, root)
- d = build_utils.standard_dict2(name, sig, '${component}')
- build_utils.expand_template(d, inp)
-")
-
- #make a list of all the generated headers
- unset(expanded_files_h)
- foreach(sig ${ARGN})
- string(REGEX REPLACE "X+" ${sig} name ${root})
- list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h)
- endforeach(sig)
- unset(name)
-
- #create a command to generate the headers
- add_custom_command(
- OUTPUT ${expanded_files_h}
- DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
- ${root} ${root}.h.t ${ARGN}
- )
-
- #install rules for the generated headers
- list(APPEND generated_includes ${expanded_files_h})
-
-endmacro(GR_EXPAND_X_H)
-
-macro(GR_EXPAND_X_CC_H component root)
-
- include(GrPython)
-
- file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
-"#!${PYTHON_EXECUTABLE}
-
-import sys, os, re
-sys.path.append('${GR_RUNTIME_PYTHONPATH}')
-sys.path.append('${CMAKE_SOURCE_DIR}/python')
-os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}'
-os.chdir('${CMAKE_CURRENT_BINARY_DIR}')
-
-if __name__ == '__main__':
- import build_utils
- root, inp = sys.argv[1:3]
- for sig in sys.argv[3:]:
- name = re.sub ('X+', sig, root)
- d = build_utils.standard_impl_dict2(name, sig, '${component}')
- build_utils.expand_template(d, inp)
-")
-
- #make a list of all the generated files
- unset(expanded_files_cc)
- unset(expanded_files_h)
- foreach(sig ${ARGN})
- string(REGEX REPLACE "X+" ${sig} name ${root})
- list(APPEND expanded_files_cc ${CMAKE_CURRENT_BINARY_DIR}/${name}.cc)
- list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h)
- endforeach(sig)
- unset(name)
-
- #create a command to generate the source files
- add_custom_command(
- OUTPUT ${expanded_files_cc}
- DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.cc.t
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
- ${root} ${root}.cc.t ${ARGN}
- )
-
- #create a command to generate the header files
- add_custom_command(
- OUTPUT ${expanded_files_h}
- DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
- ${root} ${root}.h.t ${ARGN}
- )
-
- #make source files depends on headers to force generation
- set_source_files_properties(${expanded_files_cc}
- PROPERTIES OBJECT_DEPENDS "${expanded_files_h}"
- )
-
- #install rules for the generated files
- list(APPEND generated_sources ${expanded_files_cc})
- list(APPEND generated_headers ${expanded_files_h})
-
-endmacro(GR_EXPAND_X_CC_H)
-
-macro(GR_EXPAND_X_CC_H_IMPL component root)
-
- include(GrPython)
-
- file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
-"#!${PYTHON_EXECUTABLE}
-
-import sys, os, re
-sys.path.append('${GR_RUNTIME_PYTHONPATH}')
-sys.path.append('${CMAKE_SOURCE_DIR}/python')
-os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}'
-os.chdir('${CMAKE_CURRENT_BINARY_DIR}')
-
-if __name__ == '__main__':
- import build_utils
- root, inp = sys.argv[1:3]
- for sig in sys.argv[3:]:
- name = re.sub ('X+', sig, root)
- d = build_utils.standard_dict(name, sig, '${component}')
- build_utils.expand_template(d, inp, '_impl')
-")
-
- #make a list of all the generated files
- unset(expanded_files_cc_impl)
- unset(expanded_files_h_impl)
- unset(expanded_files_h)
- foreach(sig ${ARGN})
- string(REGEX REPLACE "X+" ${sig} name ${root})
- list(APPEND expanded_files_cc_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.cc)
- list(APPEND expanded_files_h_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.h)
- list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/../include/gnuradio/${component}/${name}.h)
- endforeach(sig)
- unset(name)
-
- #create a command to generate the _impl.cc files
- add_custom_command(
- OUTPUT ${expanded_files_cc_impl}
- DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.cc.t
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
- ${root} ${root}_impl.cc.t ${ARGN}
- )
-
- #create a command to generate the _impl.h files
- add_custom_command(
- OUTPUT ${expanded_files_h_impl}
- DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.h.t
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
- ${root} ${root}_impl.h.t ${ARGN}
- )
-
- #make _impl.cc source files depend on _impl.h to force generation
- set_source_files_properties(${expanded_files_cc_impl}
- PROPERTIES OBJECT_DEPENDS "${expanded_files_h_impl}"
- )
-
- #make _impl.h source files depend on headers to force generation
- set_source_files_properties(${expanded_files_h_impl}
- PROPERTIES OBJECT_DEPENDS "${expanded_files_h}"
- )
-
- #install rules for the generated files
- list(APPEND generated_sources ${expanded_files_cc_impl})
- list(APPEND generated_headers ${expanded_files_h_impl})
-
-endmacro(GR_EXPAND_X_CC_H_IMPL)
diff --git a/cmake/Modules/GrPlatform.cmake b/cmake/Modules/GrPlatform.cmake
deleted file mode 100644
index 00a53d0..0000000
--- a/cmake/Modules/GrPlatform.cmake
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_PLATFORM_CMAKE TRUE)
-
-########################################################################
-# Setup additional defines for OS types
-########################################################################
-if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
- set(LINUX TRUE)
-endif()
-
-if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/debian_version")
- set(DEBIAN TRUE)
-endif()
-
-if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/redhat-release")
- set(REDHAT TRUE)
-endif()
-
-if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/slackware-version")
- set(SLACKWARE TRUE)
-endif()
-
-########################################################################
-# when the library suffix should be 64 (applies to redhat linux family)
-########################################################################
-if (REDHAT OR SLACKWARE)
- set(LIB64_CONVENTION TRUE)
-endif()
-
-if(NOT DEFINED LIB_SUFFIX AND LIB64_CONVENTION AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$")
- set(LIB_SUFFIX 64)
-endif()
-
-########################################################################
-# Detect /lib versus /lib64
-########################################################################
-if (CMAKE_INSTALL_LIBDIR MATCHES lib64)
- set(LIB_SUFFIX 64)
-endif()
-
-set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix")
diff --git a/cmake/Modules/GrPython.cmake b/cmake/Modules/GrPython.cmake
deleted file mode 100644
index 06e061e..0000000
--- a/cmake/Modules/GrPython.cmake
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_PYTHON_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_PYTHON_CMAKE TRUE)
-
-########################################################################
-# Setup the python interpreter:
-# This allows the user to specify a specific interpreter,
-# or finds the interpreter via the built-in cmake module.
-########################################################################
-#this allows the user to override PYTHON_EXECUTABLE
-if(PYTHON_EXECUTABLE)
-
- set(PYTHONINTERP_FOUND TRUE)
-
-#otherwise if not set, try to automatically find it
-else(PYTHON_EXECUTABLE)
-
- #use the built-in find script
- find_package(PythonInterp 2)
-
- #and if that fails use the find program routine
- if(NOT PYTHONINTERP_FOUND)
- find_program(PYTHON_EXECUTABLE NAMES python python2 python2.7 python2.6 python2.5)
- if(PYTHON_EXECUTABLE)
- set(PYTHONINTERP_FOUND TRUE)
- endif(PYTHON_EXECUTABLE)
- endif(NOT PYTHONINTERP_FOUND)
-
-endif(PYTHON_EXECUTABLE)
-
-if (CMAKE_CROSSCOMPILING)
- set(QA_PYTHON_EXECUTABLE "/usr/bin/python")
-else (CMAKE_CROSSCOMPILING)
- set(QA_PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE})
-endif(CMAKE_CROSSCOMPILING)
-
-#make the path to the executable appear in the cmake gui
-set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter")
-set(QA_PYTHON_EXECUTABLE ${QA_PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter for QA tests")
-
-#make sure we can use -B with python (introduced in 2.6)
-if(PYTHON_EXECUTABLE)
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -B -c ""
- OUTPUT_QUIET ERROR_QUIET
- RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT
- )
- if(PYTHON_HAS_DASH_B_RESULT EQUAL 0)
- set(PYTHON_DASH_B "-B")
- endif()
-endif(PYTHON_EXECUTABLE)
-
-########################################################################
-# Check for the existence of a python module:
-# - desc a string description of the check
-# - mod the name of the module to import
-# - cmd an additional command to run
-# - have the result variable to set
-########################################################################
-macro(GR_PYTHON_CHECK_MODULE desc mod cmd have)
- message(STATUS "")
- message(STATUS "Python checking for ${desc}")
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -c "
-#########################################
-try:
- import ${mod}
- assert ${cmd}
-except ImportError, AssertionError: exit(-1)
-except: pass
-#########################################"
- RESULT_VARIABLE ${have}
- )
- if(${have} EQUAL 0)
- message(STATUS "Python checking for ${desc} - found")
- set(${have} TRUE)
- else(${have} EQUAL 0)
- message(STATUS "Python checking for ${desc} - not found")
- set(${have} FALSE)
- endif(${have} EQUAL 0)
-endmacro(GR_PYTHON_CHECK_MODULE)
-
-########################################################################
-# Sets the python installation directory GR_PYTHON_DIR
-########################################################################
-if(NOT DEFINED GR_PYTHON_DIR)
-execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "
-from distutils import sysconfig
-print sysconfig.get_python_lib(plat_specific=True, prefix='')
-" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE
-)
-endif()
-file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR)
-
-########################################################################
-# Create an always-built target with a unique name
-# Usage: GR_UNIQUE_TARGET( )
-########################################################################
-function(GR_UNIQUE_TARGET desc)
- file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR})
- execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib
-unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5]
-print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))"
- OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE)
- add_custom_target(${_target} ALL DEPENDS ${ARGN})
-endfunction(GR_UNIQUE_TARGET)
-
-########################################################################
-# Install python sources (also builds and installs byte-compiled python)
-########################################################################
-function(GR_PYTHON_INSTALL)
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN})
-
- ####################################################################
- if(GR_PYTHON_INSTALL_FILES)
- ####################################################################
- install(${ARGN}) #installs regular python files
-
- #create a list of all generated files
- unset(pysrcfiles)
- unset(pycfiles)
- unset(pyofiles)
- foreach(pyfile ${GR_PYTHON_INSTALL_FILES})
- get_filename_component(pyfile ${pyfile} ABSOLUTE)
- list(APPEND pysrcfiles ${pyfile})
-
- #determine if this file is in the source or binary directory
- file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile})
- string(LENGTH "${source_rel_path}" source_rel_path_len)
- file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile})
- string(LENGTH "${binary_rel_path}" binary_rel_path_len)
-
- #and set the generated path appropriately
- if(${source_rel_path_len} GREATER ${binary_rel_path_len})
- set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path})
- else()
- set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path})
- endif()
- list(APPEND pycfiles ${pygenfile}c)
- list(APPEND pyofiles ${pygenfile}o)
-
- #ensure generation path exists
- get_filename_component(pygen_path ${pygenfile} PATH)
- file(MAKE_DIRECTORY ${pygen_path})
-
- endforeach(pyfile)
-
- #the command to generate the pyc files
- add_custom_command(
- DEPENDS ${pysrcfiles} OUTPUT ${pycfiles}
- COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles}
- )
-
- #the command to generate the pyo files
- add_custom_command(
- DEPENDS ${pysrcfiles} OUTPUT ${pyofiles}
- COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles}
- )
-
- #create install rule and add generated files to target list
- set(python_install_gen_targets ${pycfiles} ${pyofiles})
- install(FILES ${python_install_gen_targets}
- DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
- COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
- )
-
- ####################################################################
- elseif(GR_PYTHON_INSTALL_PROGRAMS)
- ####################################################################
- file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native)
-
- if (CMAKE_CROSSCOMPILING)
- set(pyexe_native "/usr/bin/env python")
- endif()
-
- foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS})
- get_filename_component(pyfile_name ${pyfile} NAME)
- get_filename_component(pyfile ${pyfile} ABSOLUTE)
- string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe")
- list(APPEND python_install_gen_targets ${pyexefile})
-
- get_filename_component(pyexefile_path ${pyexefile} PATH)
- file(MAKE_DIRECTORY ${pyexefile_path})
-
- add_custom_command(
- OUTPUT ${pyexefile} DEPENDS ${pyfile}
- COMMAND ${PYTHON_EXECUTABLE} -c
- "import re; R=re.compile('^\#!.*$\\n',flags=re.MULTILINE); open('${pyexefile}','w').write('\#!${pyexe_native}\\n'+R.sub('',open('${pyfile}','r').read()))"
- COMMENT "Shebangin ${pyfile_name}"
- VERBATIM
- )
-
- #on windows, python files need an extension to execute
- get_filename_component(pyfile_ext ${pyfile} EXT)
- if(WIN32 AND NOT pyfile_ext)
- set(pyfile_name "${pyfile_name}.py")
- endif()
-
- install(PROGRAMS ${pyexefile} RENAME ${pyfile_name}
- DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
- COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
- )
- endforeach(pyfile)
-
- endif()
-
- GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets})
-
-endfunction(GR_PYTHON_INSTALL)
-
-########################################################################
-# Write the python helper script that generates byte code files
-########################################################################
-file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py "
-import sys, py_compile
-files = sys.argv[1:]
-srcs, gens = files[:len(files)/2], files[len(files)/2:]
-for src, gen in zip(srcs, gens):
- py_compile.compile(file=src, cfile=gen, doraise=True)
-")
diff --git a/cmake/Modules/GrSwig.cmake b/cmake/Modules/GrSwig.cmake
deleted file mode 100644
index 33f37d2..0000000
--- a/cmake/Modules/GrSwig.cmake
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_SWIG_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_SWIG_CMAKE TRUE)
-
-include(GrPython)
-
-########################################################################
-# Builds a swig documentation file to be generated into python docstrings
-# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....)
-#
-# Set the following variable to specify extra dependent targets:
-# - GR_SWIG_DOCS_SOURCE_DEPS
-# - GR_SWIG_DOCS_TARGET_DEPS
-########################################################################
-function(GR_SWIG_MAKE_DOCS output_file)
- if(ENABLE_DOXYGEN)
-
- #setup the input files variable list, quote formated
- set(input_files)
- unset(INPUT_PATHS)
- foreach(input_path ${ARGN})
- if(IS_DIRECTORY ${input_path}) #when input path is a directory
- file(GLOB input_path_h_files ${input_path}/*.h)
- else() #otherwise its just a file, no glob
- set(input_path_h_files ${input_path})
- endif()
- list(APPEND input_files ${input_path_h_files})
- set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"")
- endforeach(input_path)
-
- #determine the output directory
- get_filename_component(name ${output_file} NAME_WE)
- get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH)
- set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs)
- make_directory(${OUTPUT_DIRECTORY})
-
- #generate the Doxyfile used by doxygen
- configure_file(
- ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in
- ${OUTPUT_DIRECTORY}/Doxyfile
- @ONLY)
-
- #Create a dummy custom command that depends on other targets
- include(GrMiscUtils)
- GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS})
-
- #call doxygen on the Doxyfile + input headers
- add_custom_command(
- OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml
- DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps}
- COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile
- COMMENT "Generating doxygen xml for ${name} docs"
- )
-
- #call the swig_doc script on the xml files
- add_custom_command(
- OUTPUT ${output_file}
- DEPENDS ${input_files} ${stamp-file} ${OUTPUT_DIRECTORY}/xml/index.xml
- COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
- ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py
- ${OUTPUT_DIRECTORY}/xml
- ${output_file}
- COMMENT "Generating python docstrings for ${name}"
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen
- )
-
- else(ENABLE_DOXYGEN)
- file(WRITE ${output_file} "\n") #no doxygen -> empty file
- endif(ENABLE_DOXYGEN)
-endfunction(GR_SWIG_MAKE_DOCS)
-
-########################################################################
-# Build a swig target for the common gnuradio use case. Usage:
-# GR_SWIG_MAKE(target ifile ifile ifile...)
-#
-# Set the following variables before calling:
-# - GR_SWIG_FLAGS
-# - GR_SWIG_INCLUDE_DIRS
-# - GR_SWIG_LIBRARIES
-# - GR_SWIG_SOURCE_DEPS
-# - GR_SWIG_TARGET_DEPS
-# - GR_SWIG_DOC_FILE
-# - GR_SWIG_DOC_DIRS
-########################################################################
-macro(GR_SWIG_MAKE name)
- set(ifiles ${ARGN})
-
- # Shimming this in here to take care of a SWIG bug with handling
- # vector and vector (on 32-bit machines) and
- # vector (on 64-bit machines). Use this to test
- # the size of size_t, then set SIZE_T_32 if it's a 32-bit machine
- # or not if it's 64-bit. The logic in gr_type.i handles the rest.
- INCLUDE(CheckTypeSize)
- CHECK_TYPE_SIZE("size_t" SIZEOF_SIZE_T)
- CHECK_TYPE_SIZE("unsigned int" SIZEOF_UINT)
- if(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT})
- list(APPEND GR_SWIG_FLAGS -DSIZE_T_32)
- endif(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT})
-
- #do swig doc generation if specified
- if(GR_SWIG_DOC_FILE)
- set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS})
- list(APPEND GR_SWIG_DOCS_TARGET_DEPS ${GR_SWIG_TARGET_DEPS})
- GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS})
- add_custom_target(${name}_swig_doc DEPENDS ${GR_SWIG_DOC_FILE})
- list(APPEND GR_SWIG_TARGET_DEPS ${name}_swig_doc ${GR_RUNTIME_SWIG_DOC_FILE})
- endif()
-
- #append additional include directories
- find_package(PythonLibs 2)
- list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs)
- list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS})
-
- #prepend local swig directories
- list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_SOURCE_DIR})
- list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_BINARY_DIR})
-
- #determine include dependencies for swig file
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE}
- ${CMAKE_BINARY_DIR}/get_swig_deps.py
- "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}"
- OUTPUT_STRIP_TRAILING_WHITESPACE
- OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- )
-
- #Create a dummy custom command that depends on other targets
- include(GrMiscUtils)
- GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS})
- set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag)
- add_custom_command(
- OUTPUT ${tag_file}
- DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps}
- COMMAND ${CMAKE_COMMAND} -E touch ${tag_file}
- )
-
- #append the specified include directories
- include_directories(${GR_SWIG_INCLUDE_DIRS})
- list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file})
-
- #setup the swig flags with flags and include directories
- set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS})
- foreach(dir ${GR_SWIG_INCLUDE_DIRS})
- list(APPEND CMAKE_SWIG_FLAGS "-I${dir}")
- endforeach(dir)
-
- #set the C++ property on the swig .i file so it builds
- set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON)
-
- #setup the actual swig library target to be built
- include(UseSWIG)
- SWIG_ADD_MODULE(${name} python ${ifiles})
- if(APPLE)
- set(PYTHON_LINK_OPTIONS "-undefined dynamic_lookup")
- else()
- set(PYTHON_LINK_OPTIONS ${PYTHON_LIBRARIES})
- endif(APPLE)
- SWIG_LINK_LIBRARIES(${name} ${PYTHON_LINK_OPTIONS} ${GR_SWIG_LIBRARIES})
- if(${name} STREQUAL "runtime_swig")
- SET_TARGET_PROPERTIES(${SWIG_MODULE_runtime_swig_REAL_NAME} PROPERTIES DEFINE_SYMBOL "gnuradio_runtime_EXPORTS")
- endif(${name} STREQUAL "runtime_swig")
-
-endmacro(GR_SWIG_MAKE)
-
-########################################################################
-# Install swig targets generated by GR_SWIG_MAKE. Usage:
-# GR_SWIG_INSTALL(
-# TARGETS target target target...
-# [DESTINATION destination]
-# [COMPONENT component]
-# )
-########################################################################
-macro(GR_SWIG_INSTALL)
-
- include(CMakeParseArgumentsCopy)
- CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN})
-
- foreach(name ${GR_SWIG_INSTALL_TARGETS})
- install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME}
- DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
- COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
- )
-
- include(GrPython)
- GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py
- DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
- COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
- )
-
- GR_LIBTOOL(
- TARGET ${SWIG_MODULE_${name}_REAL_NAME}
- DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
- )
-
- endforeach(name)
-
-endmacro(GR_SWIG_INSTALL)
-
-########################################################################
-# Generate a python file that can determine swig dependencies.
-# Used by the make macro above to determine extra dependencies.
-# When you build C++, CMake figures out the header dependencies.
-# This code essentially performs that logic for swig includes.
-########################################################################
-file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py "
-
-import os, sys, re
-
-i_include_matcher = re.compile('%(include|import)\\s*[<|\"](.*)[>|\"]')
-h_include_matcher = re.compile('#(include)\\s*[<|\"](.*)[>|\"]')
-include_dirs = sys.argv[2].split(';')
-
-def get_swig_incs(file_path):
- if file_path.endswith('.i'): matcher = i_include_matcher
- else: matcher = h_include_matcher
- file_contents = open(file_path, 'r').read()
- return matcher.findall(file_contents, re.MULTILINE)
-
-def get_swig_deps(file_path, level):
- deps = [file_path]
- if level == 0: return deps
- for keyword, inc_file in get_swig_incs(file_path):
- for inc_dir in include_dirs:
- inc_path = os.path.join(inc_dir, inc_file)
- if not os.path.exists(inc_path): continue
- deps.extend(get_swig_deps(inc_path, level-1))
- break #found, we dont search in lower prio inc dirs
- return deps
-
-if __name__ == '__main__':
- ifiles = sys.argv[1].split(';')
- deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], [])
- #sys.stderr.write(';'.join(set(deps)) + '\\n\\n')
- print(';'.join(set(deps)))
-")
diff --git a/cmake/Modules/GrTest.cmake b/cmake/Modules/GrTest.cmake
deleted file mode 100644
index 62caab4..0000000
--- a/cmake/Modules/GrTest.cmake
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2010-2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-if(DEFINED __INCLUDED_GR_TEST_CMAKE)
- return()
-endif()
-set(__INCLUDED_GR_TEST_CMAKE TRUE)
-
-########################################################################
-# Add a unit test and setup the environment for a unit test.
-# Takes the same arguments as the ADD_TEST function.
-#
-# Before calling set the following variables:
-# GR_TEST_TARGET_DEPS - built targets for the library path
-# GR_TEST_LIBRARY_DIRS - directories for the library path
-# GR_TEST_PYTHON_DIRS - directories for the python path
-# GR_TEST_ENVIRONS - other environment key/value pairs
-########################################################################
-function(GR_ADD_TEST test_name)
-
- #Ensure that the build exe also appears in the PATH.
- list(APPEND GR_TEST_TARGET_DEPS ${ARGN})
-
- #In the land of windows, all libraries must be in the PATH.
- #Since the dependent libraries are not yet installed,
- #we must manually set them in the PATH to run tests.
- #The following appends the path of a target dependency.
- foreach(target ${GR_TEST_TARGET_DEPS})
- get_target_property(location ${target} LOCATION)
- if(location)
- get_filename_component(path ${location} PATH)
- string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path})
- list(APPEND GR_TEST_LIBRARY_DIRS ${path})
- endif(location)
- endforeach(target)
-
- if(WIN32)
- #SWIG generates the python library files into a subdirectory.
- #Therefore, we must append this subdirectory into PYTHONPATH.
- #Only do this for the python directories matching the following:
- foreach(pydir ${GR_TEST_PYTHON_DIRS})
- get_filename_component(name ${pydir} NAME)
- if(name MATCHES "^(swig|lib|src)$")
- list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE})
- endif()
- endforeach(pydir)
- endif(WIN32)
-
- file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir)
- file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list?
- file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list?
-
- set(environs "VOLK_GENERIC=1" "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}")
- list(APPEND environs ${GR_TEST_ENVIRONS})
-
- #http://www.cmake.org/pipermail/cmake/2009-May/029464.html
- #Replaced this add test + set environs code with the shell script generation.
- #Its nicer to be able to manually run the shell script to diagnose problems.
- #ADD_TEST(${ARGV})
- #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}")
-
- if(UNIX)
- set(LD_PATH_VAR "LD_LIBRARY_PATH")
- if(APPLE)
- set(LD_PATH_VAR "DYLD_LIBRARY_PATH")
- endif()
-
- set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH")
- list(APPEND libpath "$${LD_PATH_VAR}")
- list(APPEND pypath "$PYTHONPATH")
-
- #replace list separator with the path separator
- string(REPLACE ";" ":" libpath "${libpath}")
- string(REPLACE ";" ":" pypath "${pypath}")
- list(APPEND environs "PATH=${binpath}" "${LD_PATH_VAR}=${libpath}" "PYTHONPATH=${pypath}")
-
- #generate a bat file that sets the environment and runs the test
- if (CMAKE_CROSSCOMPILING)
- set(SHELL "/bin/sh")
- else(CMAKE_CROSSCOMPILING)
- find_program(SHELL sh)
- endif(CMAKE_CROSSCOMPILING)
- set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh)
- file(WRITE ${sh_file} "#!${SHELL}\n")
- #each line sets an environment variable
- foreach(environ ${environs})
- file(APPEND ${sh_file} "export ${environ}\n")
- endforeach(environ)
- #load the command to run with its arguments
- foreach(arg ${ARGN})
- file(APPEND ${sh_file} "${arg} ")
- endforeach(arg)
- file(APPEND ${sh_file} "\n")
-
- #make the shell file executable
- execute_process(COMMAND chmod +x ${sh_file})
-
- add_test(${test_name} ${SHELL} ${sh_file})
-
- endif(UNIX)
-
- if(WIN32)
- list(APPEND libpath ${DLL_PATHS} "%PATH%")
- list(APPEND pypath "%PYTHONPATH%")
-
- #replace list separator with the path separator (escaped)
- string(REPLACE ";" "\\;" libpath "${libpath}")
- string(REPLACE ";" "\\;" pypath "${pypath}")
- list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}")
-
- #generate a bat file that sets the environment and runs the test
- set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat)
- file(WRITE ${bat_file} "@echo off\n")
- #each line sets an environment variable
- foreach(environ ${environs})
- file(APPEND ${bat_file} "SET ${environ}\n")
- endforeach(environ)
- #load the command to run with its arguments
- foreach(arg ${ARGN})
- file(APPEND ${bat_file} "${arg} ")
- endforeach(arg)
- file(APPEND ${bat_file} "\n")
-
- add_test(${test_name} ${bat_file})
- endif(WIN32)
-
-endfunction(GR_ADD_TEST)
diff --git a/cmake/Modules/UseSWIG.cmake b/cmake/Modules/UseSWIG.cmake
deleted file mode 100644
index c0f1728..0000000
--- a/cmake/Modules/UseSWIG.cmake
+++ /dev/null
@@ -1,304 +0,0 @@
-# - SWIG module for CMake
-# Defines the following macros:
-# SWIG_ADD_MODULE(name language [ files ])
-# - Define swig module with given name and specified language
-# SWIG_LINK_LIBRARIES(name [ libraries ])
-# - Link libraries to swig module
-# All other macros are for internal use only.
-# To get the actual name of the swig module,
-# use: ${SWIG_MODULE_${name}_REAL_NAME}.
-# Set Source files properties such as CPLUSPLUS and SWIG_FLAGS to specify
-# special behavior of SWIG. Also global CMAKE_SWIG_FLAGS can be used to add
-# special flags to all swig calls.
-# Another special variable is CMAKE_SWIG_OUTDIR, it allows one to specify
-# where to write all the swig generated module (swig -outdir option)
-# The name-specific variable SWIG_MODULE__EXTRA_DEPS may be used
-# to specify extra dependencies for the generated modules.
-# If the source file generated by swig need some special flag you can use
-# set_source_files_properties( ${swig_generated_file_fullname}
-# PROPERTIES COMPILE_FLAGS "-bla")
-
-
-#=============================================================================
-# Copyright 2004-2009 Kitware, Inc.
-# Copyright 2009 Mathieu Malaterre
-#
-# Distributed under the OSI-approved BSD License (the "License");
-# see accompanying file Copyright.txt for details.
-#
-# This software is distributed WITHOUT ANY WARRANTY; without even the
-# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the License for more information.
-#=============================================================================
-# (To distribute this file outside of CMake, substitute the full
-# License text for the above reference.)
-
-set(SWIG_CXX_EXTENSION "cxx")
-set(SWIG_EXTRA_LIBRARIES "")
-
-set(SWIG_PYTHON_EXTRA_FILE_EXTENSION "py")
-
-#
-# For given swig module initialize variables associated with it
-#
-macro(SWIG_MODULE_INITIALIZE name language)
- string(TOUPPER "${language}" swig_uppercase_language)
- string(TOLOWER "${language}" swig_lowercase_language)
- set(SWIG_MODULE_${name}_LANGUAGE "${swig_uppercase_language}")
- set(SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG "${swig_lowercase_language}")
-
- set(SWIG_MODULE_${name}_REAL_NAME "${name}")
- if("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "UNKNOWN")
- message(FATAL_ERROR "SWIG Error: Language \"${language}\" not found")
- elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PYTHON")
- # when swig is used without the -interface it will produce in the module.py
- # a 'import _modulename' statement, which implies having a corresponding
- # _modulename.so (*NIX), _modulename.pyd (Win32).
- set(SWIG_MODULE_${name}_REAL_NAME "_${name}")
- elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PERL")
- set(SWIG_MODULE_${name}_EXTRA_FLAGS "-shadow")
- endif()
-endmacro()
-
-#
-# For a given language, input file, and output file, determine extra files that
-# will be generated. This is internal swig macro.
-#
-
-macro(SWIG_GET_EXTRA_OUTPUT_FILES language outfiles generatedpath infile)
- set(${outfiles} "")
- get_source_file_property(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename
- ${infile} SWIG_MODULE_NAME)
- if(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename STREQUAL "NOTFOUND")
- get_filename_component(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename "${infile}" NAME_WE)
- endif()
- foreach(it ${SWIG_${language}_EXTRA_FILE_EXTENSION})
- set(${outfiles} ${${outfiles}}
- "${generatedpath}/${SWIG_GET_EXTRA_OUTPUT_FILES_module_basename}.${it}")
- endforeach()
-endmacro()
-
-#
-# Take swig (*.i) file and add proper custom commands for it
-#
-macro(SWIG_ADD_SOURCE_TO_MODULE name outfiles infile)
- set(swig_full_infile ${infile})
- get_filename_component(swig_source_file_path "${infile}" PATH)
- get_filename_component(swig_source_file_name_we "${infile}" NAME_WE)
- get_source_file_property(swig_source_file_generated ${infile} GENERATED)
- get_source_file_property(swig_source_file_cplusplus ${infile} CPLUSPLUS)
- get_source_file_property(swig_source_file_flags ${infile} SWIG_FLAGS)
- if("${swig_source_file_flags}" STREQUAL "NOTFOUND")
- set(swig_source_file_flags "")
- endif()
- set(swig_source_file_fullname "${infile}")
- if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_SOURCE_DIR}")
- string(REGEX REPLACE
- "^${CMAKE_CURRENT_SOURCE_DIR}" ""
- swig_source_file_relative_path
- "${swig_source_file_path}")
- else()
- if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_BINARY_DIR}")
- string(REGEX REPLACE
- "^${CMAKE_CURRENT_BINARY_DIR}" ""
- swig_source_file_relative_path
- "${swig_source_file_path}")
- set(swig_source_file_generated 1)
- else()
- set(swig_source_file_relative_path "${swig_source_file_path}")
- if(swig_source_file_generated)
- set(swig_source_file_fullname "${CMAKE_CURRENT_BINARY_DIR}/${infile}")
- else()
- set(swig_source_file_fullname "${CMAKE_CURRENT_SOURCE_DIR}/${infile}")
- endif()
- endif()
- endif()
-
- set(swig_generated_file_fullname
- "${CMAKE_CURRENT_BINARY_DIR}")
- if(swig_source_file_relative_path)
- set(swig_generated_file_fullname
- "${swig_generated_file_fullname}/${swig_source_file_relative_path}")
- endif()
- # If CMAKE_SWIG_OUTDIR was specified then pass it to -outdir
- if(CMAKE_SWIG_OUTDIR)
- set(swig_outdir ${CMAKE_SWIG_OUTDIR})
- else()
- set(swig_outdir ${CMAKE_CURRENT_BINARY_DIR})
- endif()
- SWIG_GET_EXTRA_OUTPUT_FILES(${SWIG_MODULE_${name}_LANGUAGE}
- swig_extra_generated_files
- "${swig_outdir}"
- "${infile}")
- set(swig_generated_file_fullname
- "${swig_generated_file_fullname}/${swig_source_file_name_we}")
- # add the language into the name of the file (i.e. TCL_wrap)
- # this allows for the same .i file to be wrapped into different languages
- set(swig_generated_file_fullname
- "${swig_generated_file_fullname}${SWIG_MODULE_${name}_LANGUAGE}_wrap")
-
- if(swig_source_file_cplusplus)
- set(swig_generated_file_fullname
- "${swig_generated_file_fullname}.${SWIG_CXX_EXTENSION}")
- else()
- set(swig_generated_file_fullname
- "${swig_generated_file_fullname}.c")
- endif()
-
- # Shut up some warnings from poor SWIG code generation that we
- # can do nothing about, when this flag is available
- include(CheckCXXCompilerFlag)
- check_cxx_compiler_flag("-Wno-unused-but-set-variable" HAVE_WNO_UNUSED_BUT_SET_VARIABLE)
- if(HAVE_WNO_UNUSED_BUT_SET_VARIABLE)
- set_source_files_properties(${swig_generated_file_fullname}
- PROPERTIES COMPILE_FLAGS "-Wno-unused-but-set-variable")
- endif(HAVE_WNO_UNUSED_BUT_SET_VARIABLE)
-
- get_directory_property(cmake_include_directories INCLUDE_DIRECTORIES)
- set(swig_include_dirs)
- foreach(it ${cmake_include_directories})
- set(swig_include_dirs ${swig_include_dirs} "-I${it}")
- endforeach()
-
- set(swig_special_flags)
- # default is c, so add c++ flag if it is c++
- if(swig_source_file_cplusplus)
- set(swig_special_flags ${swig_special_flags} "-c++")
- endif()
- set(swig_extra_flags)
- if(SWIG_MODULE_${name}_EXTRA_FLAGS)
- set(swig_extra_flags ${swig_extra_flags} ${SWIG_MODULE_${name}_EXTRA_FLAGS})
- endif()
-
- # hack to work around CMake bug in add_custom_command with multiple OUTPUT files
-
- file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR})
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib
-unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5]
-print(re.sub('\\W', '_', '${name} ${reldir} ' + unique))"
- OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE
- )
-
- file(
- WRITE ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in
- "int main(void){return 0;}\n"
- )
-
- # create dummy dependencies
- add_custom_command(
- OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp
- COMMAND ${CMAKE_COMMAND} -E copy
- ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in
- ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp
- DEPENDS "${swig_source_file_fullname}" ${SWIG_MODULE_${name}_EXTRA_DEPS}
- COMMENT ""
- )
-
- # create the dummy target
- add_executable(${_target} ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp)
-
- # add a custom command to the dummy target
- add_custom_command(
- TARGET ${_target}
- # Let's create the ${swig_outdir} at execution time, in case dir contains $(OutDir)
- COMMAND ${CMAKE_COMMAND} -E make_directory ${swig_outdir}
- COMMAND "${SWIG_EXECUTABLE}"
- ARGS "-${SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG}"
- ${swig_source_file_flags}
- ${CMAKE_SWIG_FLAGS}
- -outdir ${swig_outdir}
- ${swig_special_flags}
- ${swig_extra_flags}
- ${swig_include_dirs}
- -o "${swig_generated_file_fullname}"
- "${swig_source_file_fullname}"
- COMMENT "Swig source"
- )
-
- #add dummy independent dependencies from the _target to each file
- #that will be generated by the SWIG command above
-
- set(${outfiles} "${swig_generated_file_fullname}" ${swig_extra_generated_files})
-
- foreach(swig_gen_file ${${outfiles}})
- add_custom_command(
- OUTPUT ${swig_gen_file}
- COMMAND ""
- DEPENDS ${_target}
- COMMENT ""
- )
- endforeach()
-
- set_source_files_properties(
- ${outfiles} PROPERTIES GENERATED 1
- )
-
-endmacro()
-
-#
-# Create Swig module
-#
-macro(SWIG_ADD_MODULE name language)
- SWIG_MODULE_INITIALIZE(${name} ${language})
- set(swig_dot_i_sources)
- set(swig_other_sources)
- foreach(it ${ARGN})
- if(${it} MATCHES ".*\\.i$")
- set(swig_dot_i_sources ${swig_dot_i_sources} "${it}")
- else()
- set(swig_other_sources ${swig_other_sources} "${it}")
- endif()
- endforeach()
-
- set(swig_generated_sources)
- foreach(it ${swig_dot_i_sources})
- SWIG_ADD_SOURCE_TO_MODULE(${name} swig_generated_source ${it})
- set(swig_generated_sources ${swig_generated_sources} "${swig_generated_source}")
- endforeach()
- get_directory_property(swig_extra_clean_files ADDITIONAL_MAKE_CLEAN_FILES)
- set_directory_properties(PROPERTIES
- ADDITIONAL_MAKE_CLEAN_FILES "${swig_extra_clean_files};${swig_generated_sources}")
- add_library(${SWIG_MODULE_${name}_REAL_NAME}
- MODULE
- ${swig_generated_sources}
- ${swig_other_sources})
- string(TOLOWER "${language}" swig_lowercase_language)
- if ("${swig_lowercase_language}" STREQUAL "java")
- if (APPLE)
- # In java you want:
- # System.loadLibrary("LIBRARY");
- # then JNI will look for a library whose name is platform dependent, namely
- # MacOS : libLIBRARY.jnilib
- # Windows: LIBRARY.dll
- # Linux : libLIBRARY.so
- set_target_properties (${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".jnilib")
- endif ()
- endif ()
- if ("${swig_lowercase_language}" STREQUAL "python")
- # this is only needed for the python case where a _modulename.so is generated
- set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES PREFIX "")
- # Python extension modules on Windows must have the extension ".pyd"
- # instead of ".dll" as of Python 2.5. Older python versions do support
- # this suffix.
- # http://docs.python.org/whatsnew/ports.html#SECTION0001510000000000000000
- #
- # Windows: .dll is no longer supported as a filename extension for extension modules.
- # .pyd is now the only filename extension that will be searched for.
- #
- if(WIN32 AND NOT CYGWIN)
- set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".pyd")
- endif()
- endif ()
-endmacro()
-
-#
-# Like TARGET_LINK_LIBRARIES but for swig modules
-#
-macro(SWIG_LINK_LIBRARIES name)
- if(SWIG_MODULE_${name}_REAL_NAME)
- target_link_libraries(${SWIG_MODULE_${name}_REAL_NAME} ${ARGN})
- else()
- message(SEND_ERROR "Cannot find Swig library \"${name}\".")
- endif()
-endmacro()
diff --git a/cmake/Modules/verilogConfig.cmake b/cmake/Modules/gnuradio-verilogConfig.cmake
similarity index 56%
rename from cmake/Modules/verilogConfig.cmake
rename to cmake/Modules/gnuradio-verilogConfig.cmake
index fca291a..fc11e46 100644
--- a/cmake/Modules/verilogConfig.cmake
+++ b/cmake/Modules/gnuradio-verilogConfig.cmake
@@ -1,9 +1,10 @@
-INCLUDE(FindPkgConfig)
-PKG_CHECK_MODULES(PC_VERILOG verilog)
+find_package(PkgConfig)
+
+PKG_CHECK_MODULES(PC_GR_VERILOG gnuradio-verilog)
FIND_PATH(
- VERILOG_INCLUDE_DIRS
- NAMES verilog/api.h
+ GR_VERILOG_INCLUDE_DIRS
+ NAMES gnuradio/verilog/api.h
HINTS $ENV{VERILOG_DIR}/include
${PC_VERILOG_INCLUDEDIR}
PATHS ${CMAKE_INSTALL_PREFIX}/include
@@ -12,7 +13,7 @@ FIND_PATH(
)
FIND_LIBRARY(
- VERILOG_LIBRARIES
+ GR_VERILOG_LIBRARIES
NAMES gnuradio-verilog
HINTS $ENV{VERILOG_DIR}/lib
${PC_VERILOG_LIBDIR}
@@ -24,9 +25,8 @@ FIND_LIBRARY(
/usr/lib64
)
-include("${CMAKE_CURRENT_LIST_DIR}/verilogTarget.cmake")
+include("${CMAKE_CURRENT_LIST_DIR}/gnuradio-verilogTarget.cmake")
INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(VERILOG DEFAULT_MSG VERILOG_LIBRARIES VERILOG_INCLUDE_DIRS)
-MARK_AS_ADVANCED(VERILOG_LIBRARIES VERILOG_INCLUDE_DIRS)
-
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GR_VERILOG DEFAULT_MSG GR_VERILOG_LIBRARIES GR_VERILOG_INCLUDE_DIRS)
+MARK_AS_ADVANCED(GR_VERILOG_LIBRARIES GR_VERILOG_INCLUDE_DIRS)
diff --git a/cmake/Modules/targetConfig.cmake.in b/cmake/Modules/targetConfig.cmake.in
index 79e4a28..4a1fb31 100644
--- a/cmake/Modules/targetConfig.cmake.in
+++ b/cmake/Modules/targetConfig.cmake.in
@@ -2,20 +2,8 @@
#
# This file is part of GNU Radio
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
include(CMakeFindDependencyMacro)
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index f16fbf6..ba13138 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Setup dependencies
diff --git a/docs/doxygen/CMakeLists.txt b/docs/doxygen/CMakeLists.txt
index 1b44799..543c82e 100644
--- a/docs/doxygen/CMakeLists.txt
+++ b/docs/doxygen/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Create the doxygen configuration file
@@ -28,6 +17,7 @@ file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir)
set(HAVE_DOT ${DOXYGEN_DOT_FOUND})
set(enable_html_docs YES)
set(enable_latex_docs NO)
+set(enable_mathjax NO)
set(enable_xml_docs YES)
configure_file(
diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in
index a3350a4..8e47b79 100644
--- a/docs/doxygen/Doxyfile.in
+++ b/docs/doxygen/Doxyfile.in
@@ -199,13 +199,6 @@ TAB_SIZE = 8
ALIASES =
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST =
-
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
# sources only. Doxygen will then generate output that is more tailored for C.
# For instance, some of the names that are used will be different. The list
@@ -723,8 +716,6 @@ EXCLUDE_PATTERNS = */.deps/* \
EXCLUDE_SYMBOLS = ad9862 \
numpy \
- *swig* \
- *Swig* \
*my_top_block* \
*my_graph* \
*app_top_block* \
@@ -790,7 +781,7 @@ INPUT_FILTER =
# info on how filters are used. If FILTER_PATTERNS is empty or if
# non of the patterns match the file name, INPUT_FILTER is applied.
-FILTER_PATTERNS = *.py="@top_srcdir@"/doc/doxygen/other/doxypy.py
+FILTER_PATTERNS = *.py=@top_srcdir@/docs/doxygen/other/doxypy.py
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will be used to filter the input files when producing source
@@ -879,12 +870,6 @@ VERBATIM_HEADERS = YES
ALPHABETICAL_INDEX = YES
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
# In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index.
# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
@@ -1220,14 +1205,14 @@ FORMULA_TRANSPARENT = YES
# output. When enabled you may also need to install MathJax separately and
# configure the path to it using the MATHJAX_RELPATH option.
-USE_MATHJAX = NO
+USE_MATHJAX = @enable_mathjax@
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
# SVG. The default value is HTML-CSS, which is slower, but has the best
# compatibility.
-MATHJAX_FORMAT = HTML-CSS
+MATHJAX_FORMAT = SVG
# When MathJax is enabled you need to specify the location relative to the
# HTML output directory using the MATHJAX_RELPATH option. The destination
@@ -1239,12 +1224,12 @@ MATHJAX_FORMAT = HTML-CSS
# However, it is strongly recommended to install a local
# copy of MathJax from http://www.mathjax.org before deployment.
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+MATHJAX_RELPATH = @MATHJAX2_PATH@
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
-MATHJAX_EXTENSIONS =
+MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
# pieces of code that will be used on startup of the MathJax code.
@@ -1680,11 +1665,6 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH = /usr/bin/perl
-
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
@@ -1697,15 +1677,6 @@ PERL_PATH = /usr/bin/perl
CLASS_DIAGRAMS = YES
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
# If set to YES, the inheritance and collaboration graphs will hide
# inheritance and usage relations if the target is undocumented
# or is not a class.
@@ -1834,7 +1805,7 @@ DIRECTORY_GRAPH = YES
# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible in IE 9+ (other browsers do not have this requirement).
-DOT_IMAGE_FORMAT = png
+DOT_IMAGE_FORMAT = svg
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
diff --git a/docs/doxygen/Doxyfile.swig_doc.in b/docs/doxygen/Doxyfile.swig_doc.in
deleted file mode 100644
index cbe06d6..0000000
--- a/docs/doxygen/Doxyfile.swig_doc.in
+++ /dev/null
@@ -1,1878 +0,0 @@
-# Doxyfile 1.8.4
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed
-# in front of the TAG it is preceding .
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME = @CPACK_PACKAGE_NAME@
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = "@OUTPUT_DIRECTORY@"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
-# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
-# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip. Note that you specify absolute paths here, but also
-# relative paths, which will be relative from the directory where doxygen is
-# started.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension,
-# and language is one of the parsers supported by doxygen: IDL, Java,
-# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
-# C++. For instance to make doxygen treat .inc files as Fortran files (default
-# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
-# that for custom extensions you also need to set FILE_PATTERNS otherwise the
-# files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
-
-AUTOLINK_SUPPORT = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = YES
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES (the
-# default) will make doxygen replace the get and set methods by a property in
-# the documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields or simple typedef fields will be shown
-# inline in the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO (the default), structs, classes, and unions are shown on a separate
-# page (for HTML and Man pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can
-# be an expensive process and often the same symbol appear multiple times in
-# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
-# small doxygen will become slower. If the cache is too large, memory is wasted.
-# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
-# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
-# symbols.
-
-LOOKUP_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
-# scope will be included in the documentation.
-
-EXTRACT_PACKAGE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if section-label ... \endif
-# and \cond section-label ... \endcond blocks.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command , where is the value of
-# the FILE_VERSION_FILTER tag, and is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path. Do not use
-# file names with spaces, bibtex cannot handle them.
-
-CITE_BIB_FILES =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = YES
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = @INPUT_PATHS@
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS = *.h
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command , where
-# is the value of the INPUT_FILTER tag, and is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be ignored.
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = NO
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-# for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If left blank doxygen will
-# generate a default style sheet. Note that it is recommended to use
-# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
-# tag will in the future become obsolete.
-
-HTML_STYLESHEET =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
-# user-defined cascading style sheet that is included after the standard
-# style sheets created by doxygen. Using this option one can overrule
-# certain style aspects. This is preferred over using HTML_STYLESHEET
-# since it does not replace the standard style sheet and is therefore more
-# robust against future updates. Doxygen will copy the style sheet file to
-# the output directory.
-
-HTML_EXTRA_STYLESHEET =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP = NO
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
-# identify the documentation publisher. This should be a reverse domain-name
-# style string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-#
-# Qt Help Project / Custom Filters.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-#
-# Qt Help Project / Filter Attributes.
-
-QHP_SECT_FILTER_ATTRS =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
-# SVG. The default value is HTML-CSS, which is slower, but has the best
-# compatibility.
-
-MATHJAX_FORMAT = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
-# pieces of code that will be used on startup of the MathJax code.
-
-MATHJAX_CODEFILE =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript.
-# There are two flavours of web server based search depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools.
-# See the manual for details.
-
-SERVER_BASED_SEARCH = NO
-
-# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain
-# the search results. Doxygen ships with an example indexer (doxyindexer) and
-# search engine (doxysearch.cgi) which are based on the open source search
-# engine library Xapian. See the manual for configuration details.
-
-EXTERNAL_SEARCH = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will returned the search results when EXTERNAL_SEARCH is enabled.
-# Doxygen ships with an example search engine (doxysearch) which is based on
-# the open source search engine library Xapian. See the manual for configuration
-# details.
-
-SEARCHENGINE_URL =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-
-SEARCHDATA_FILE = searchdata.xml
-
-# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-
-EXTERNAL_SEARCH_ID =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
-# of to a relative location where the documentation can be found.
-# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
-
-EXTRA_SEARCH_MAPPINGS =
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4 will be used.
-
-PAPER_TYPE = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
-# or other source files which should be copied to the LaTeX output directory.
-# Note that the files will be copied as-is; there are no commands or markers
-# available.
-
-LATEX_EXTRA_FILES =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML = YES
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
-# that can be used to generate PDF.
-
-GENERATE_DOCBOOK = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it. If left blank docbook will be used as the default path.
-
-DOCBOOK_OUTPUT = docbook
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
-# in the related pages index. If set to NO, only the current project's
-# pages will be listed.
-
-EXTERNAL_PAGES = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# manageable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH = NO
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = YES
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
diff --git a/docs/doxygen/doxyxml/__init__.py b/docs/doxygen/doxyxml/__init__.py
index 0690874..381c2c3 100644
--- a/docs/doxygen/doxyxml/__init__.py
+++ b/docs/doxygen/doxyxml/__init__.py
@@ -4,20 +4,8 @@
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
Python interface to contents of doxygen xml documentation.
@@ -64,10 +52,10 @@
u'Outputs the vital aadvark statistics.'
"""
-from __future__ import unicode_literals
from .doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
+
def _test():
import os
this_dir = os.path.dirname(globals()['__file__'])
@@ -79,6 +67,6 @@ def _test():
import doctest
return doctest.testmod()
+
if __name__ == "__main__":
_test()
-
diff --git a/docs/doxygen/doxyxml/__init__.pyc b/docs/doxygen/doxyxml/__init__.pyc
deleted file mode 100644
index 8bc3177..0000000
Binary files a/docs/doxygen/doxyxml/__init__.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/base.py b/docs/doxygen/doxyxml/base.py
index 47caa35..fab7c14 100644
--- a/docs/doxygen/doxyxml/base.py
+++ b/docs/doxygen/doxyxml/base.py
@@ -4,20 +4,8 @@
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
A base class is created.
@@ -25,8 +13,6 @@
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
-from __future__ import print_function
-from __future__ import unicode_literals
import os
import pdb
@@ -97,8 +83,8 @@ def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
- raise Exception(("Did not find a class for object '%s'." \
- % (mem.get_name())))
+ raise Exception(("Did not find a class for object '%s'."
+ % (mem.get_name())))
def convert_mem(self, mem):
try:
diff --git a/docs/doxygen/doxyxml/base.pyc b/docs/doxygen/doxyxml/base.pyc
deleted file mode 100644
index 60ee156..0000000
Binary files a/docs/doxygen/doxyxml/base.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/doxyindex.py b/docs/doxygen/doxyxml/doxyindex.py
index ba0d2b2..1e734cb 100644
--- a/docs/doxygen/doxyxml/doxyindex.py
+++ b/docs/doxygen/doxyxml/doxyindex.py
@@ -4,27 +4,13 @@
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
import os
@@ -32,6 +18,7 @@
from .base import Base
from .text import description
+
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
@@ -60,17 +47,8 @@ def _parse(self):
self._members.append(converted)
-def generate_swig_doc_i(self):
- """
- %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
- Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
- """
- pass
-
-
class DoxyCompMem(Base):
-
kind = None
def __init__(self, *args, **kwargs):
@@ -106,9 +84,11 @@ def set_parameters(self, data):
class DoxyCompound(DoxyCompMem):
pass
+
class DoxyMember(DoxyCompMem):
pass
+
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
@@ -129,9 +109,11 @@ def _parse(self):
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
+
Base.mem_classes.append(DoxyFunction)
@@ -156,9 +138,11 @@ def description(self):
return '\n\n'.join(descriptions)
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
name = property(lambda self: self.data()['declname'])
+
class DoxyParameterItem(DoxyMember):
"""A different representation of a parameter in Doxygen."""
@@ -200,9 +184,11 @@ def _parse(self):
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
+
Base.mem_classes.append(DoxyClass)
@@ -223,7 +209,9 @@ def _parse(self):
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
+ detailed_description = property(
+ lambda self: self.data()['detailed_description'])
+
Base.mem_classes.append(DoxyFile)
@@ -244,6 +232,7 @@ def _parse(self):
return
self.process_memberdefs()
+
Base.mem_classes.append(DoxyNamespace)
@@ -287,6 +276,7 @@ class DoxyFriend(DoxyMember):
kind = 'friend'
+
Base.mem_classes.append(DoxyFriend)
@@ -301,4 +291,5 @@ class DoxyOther(Base):
def can_parse(cls, obj):
return obj.kind in cls.kinds
+
Base.mem_classes.append(DoxyOther)
diff --git a/docs/doxygen/doxyxml/doxyindex.pyc b/docs/doxygen/doxyxml/doxyindex.pyc
deleted file mode 100644
index 145f113..0000000
Binary files a/docs/doxygen/doxyxml/doxyindex.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/generated/__init__.py b/docs/doxygen/doxyxml/generated/__init__.py
index 23095c1..3982397 100644
--- a/docs/doxygen/doxyxml/generated/__init__.py
+++ b/docs/doxygen/doxyxml/generated/__init__.py
@@ -5,4 +5,3 @@
resultant classes are not very friendly to navigate so the rest of the
doxyxml module processes them further.
"""
-from __future__ import unicode_literals
diff --git a/docs/doxygen/doxyxml/generated/__init__.pyc b/docs/doxygen/doxyxml/generated/__init__.pyc
deleted file mode 100644
index daace25..0000000
Binary files a/docs/doxygen/doxyxml/generated/__init__.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/generated/compound.py b/docs/doxygen/doxyxml/generated/compound.py
index acfa0dd..321328b 100644
--- a/docs/doxygen/doxyxml/generated/compound.py
+++ b/docs/doxygen/doxyxml/generated/compound.py
@@ -3,8 +3,6 @@
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
from xml.dom import minidom
@@ -24,13 +22,15 @@ def find(self, details):
return self.compounddef.find(details)
+
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
- supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
+ supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass,
+ innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
def find(self, details):
@@ -50,13 +50,18 @@ def find(self, details):
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
+
+
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
- supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
+ supermod.memberRefType.__init__(
+ self, virt, prot, refid, ambiguityscope, scope, name)
+
+
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
@@ -64,6 +69,8 @@ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
+
+
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
@@ -71,6 +78,8 @@ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=No
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
+
+
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
@@ -78,6 +87,8 @@ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
class incTypeSub(supermod.incType):
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
+
+
supermod.incType.subclass = incTypeSub
# end class incTypeSub
@@ -85,23 +96,26 @@ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, conten
class refTypeSub(supermod.refType):
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
+
+
supermod.refType.subclass = refTypeSub
# end class refTypeSub
-
class refTextTypeSub(supermod.refTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
+
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
-class sectiondefTypeSub(supermod.sectiondefType):
+class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header='', description=None, memberdef=None):
- supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
+ supermod.sectiondefType.__init__(
+ self, kind, header, description, memberdef)
def find(self, details):
@@ -118,7 +132,10 @@ def find(self, details):
class memberdefTypeSub(supermod.memberdefType):
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
- supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
+ supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_,
+ definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
+
+
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
@@ -126,6 +143,8 @@ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=N
class descriptionTypeSub(supermod.descriptionType):
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
+
+
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
@@ -133,6 +152,8 @@ def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=N
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
+
+
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
@@ -140,13 +161,18 @@ def __init__(self, prot=None, id=None, name='', initializer=None, briefdescripti
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
+
+
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
- supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
+ supermod.paramType.__init__(
+ self, type_, declname, defname, array, defval, briefdescription)
+
+
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
@@ -154,6 +180,8 @@ def __init__(self, type_=None, declname='', defname='', array='', defval=None, b
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
+
+
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
@@ -161,6 +189,8 @@ def __init__(self, ref=None, mixedclass_=None, content_=None):
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
+
+
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
@@ -168,6 +198,8 @@ def __init__(self, node=None):
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
+
+
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
@@ -175,6 +207,8 @@ def __init__(self, id=None, label='', link=None, childnode=None):
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
+
+
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
@@ -182,6 +216,8 @@ def __init__(self, relation=None, refid=None, edgelabel=None):
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
+
+
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
@@ -189,13 +225,18 @@ def __init__(self, refid=None, external=None, valueOf_=''):
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
+
+
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
- supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
+ supermod.codelineType.__init__(
+ self, external, lineno, refkind, refid, highlight)
+
+
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
@@ -203,6 +244,8 @@ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlig
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
+
+
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
@@ -210,13 +253,18 @@ def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=No
class referenceTypeSub(supermod.referenceType):
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
+
+
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
- supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
+ supermod.locationType.__init__(
+ self, bodystart, line, bodyend, bodyfile, file)
+
+
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
@@ -224,6 +272,8 @@ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
@@ -231,6 +281,8 @@ def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixe
class docSect2TypeSub(supermod.docSect2Type):
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
@@ -238,6 +290,8 @@ def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixe
class docSect3TypeSub(supermod.docSect3Type):
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
@@ -245,6 +299,8 @@ def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixe
class docSect4TypeSub(supermod.docSect4Type):
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
+
+
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
@@ -252,6 +308,8 @@ def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
@@ -259,6 +317,8 @@ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
@@ -266,6 +326,8 @@ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
@@ -273,6 +335,8 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
@@ -280,6 +344,8 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
+
+
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
@@ -287,6 +353,8 @@ def __init__(self, para=None, mixedclass_=None, content_=None):
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
+
+
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
@@ -294,6 +362,8 @@ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
+
+
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
@@ -301,6 +371,8 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
+
+
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
@@ -308,6 +380,8 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
+
+
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
@@ -315,6 +389,8 @@ def __init__(self, primaryie='', secondaryie=''):
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
+
+
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
@@ -322,6 +398,8 @@ def __init__(self, listitem=None):
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
+
+
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
@@ -329,6 +407,8 @@ def __init__(self, para=None):
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
+
+
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
@@ -336,6 +416,8 @@ def __init__(self, kind=None, title=None, para=None):
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
+
+
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
@@ -343,6 +425,8 @@ def __init__(self, term=None):
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
+
+
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
@@ -350,6 +434,8 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
+
+
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
@@ -357,6 +443,8 @@ def __init__(self, rows=None, cols=None, row=None, caption=None):
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
+
+
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
@@ -364,6 +452,8 @@ def __init__(self, entry=None):
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
+
+
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
@@ -371,6 +461,8 @@ def __init__(self, thead=None, para=None):
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
+
+
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
@@ -378,6 +470,8 @@ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
class docImageTypeSub(supermod.docImageType):
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
+
+
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
@@ -385,6 +479,8 @@ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='',
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
+
+
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
@@ -392,6 +488,8 @@ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
+
+
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
@@ -399,6 +497,8 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
+
+
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
@@ -406,6 +506,8 @@ def __init__(self, tocitem=None):
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
+
+
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
@@ -413,13 +515,18 @@ def __init__(self, langid=None, para=None):
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
+
+
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
- supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
+ supermod.docParamListItem.__init__(
+ self, parameternamelist, parameterdescription)
+
+
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
@@ -427,6 +534,8 @@ def __init__(self, parameternamelist=None, parameterdescription=None):
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
+
+
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
@@ -434,6 +543,8 @@ def __init__(self, parametername=None):
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
+
+
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
@@ -441,6 +552,8 @@ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
+
+
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
@@ -448,6 +561,8 @@ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
+
+
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
@@ -455,9 +570,12 @@ def __init__(self, link=None, para=None, sect1=None, internal=None):
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
+
+
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
+
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
@@ -471,7 +589,7 @@ def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == "ref":
@@ -494,12 +612,9 @@ def buildChildren(self, child_, nodeName_):
# end class docParaTypeSub
-
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
-
-
diff --git a/docs/doxygen/doxyxml/generated/compound.pyc b/docs/doxygen/doxyxml/generated/compound.pyc
deleted file mode 100644
index 884e0ec..0000000
Binary files a/docs/doxygen/doxyxml/generated/compound.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/generated/compoundsuper.py b/docs/doxygen/doxyxml/generated/compoundsuper.py
index 6e984e1..40f548a 100644
--- a/docs/doxygen/doxyxml/generated/compoundsuper.py
+++ b/docs/doxygen/doxyxml/generated/compoundsuper.py
@@ -4,17 +4,12 @@
# Generated Thu Jun 11 18:44:25 2009 by generateDS.py.
#
-from __future__ import print_function
-from __future__ import unicode_literals
import sys
from xml.dom import minidom
from xml.dom import Node
-import six
-
-
#
# User methods
#
@@ -29,12 +24,16 @@
class GeneratedsSuper(object):
def format_string(self, input_data, input_name=''):
return input_data
+
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
+
def format_float(self, input_data, input_name=''):
return '%f' % input_data
+
def format_double(self, input_data, input_name=''):
return '%e' % input_data
+
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
@@ -46,9 +45,9 @@ def format_boolean(self, input_data, input_name=''):
## from IPython.Shell import IPShellEmbed
## args = ''
-## ipshell = IPShellEmbed(args,
+# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
-## exit_msg = 'Leaving Interpreter, back to program.')
+# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
@@ -64,20 +63,23 @@ def format_boolean(self, input_data, input_name=''):
# Support/utility functions.
#
+
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
+
def quote_xml(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
+
def quote_attrib(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
@@ -91,6 +93,7 @@ def quote_attrib(inStr):
s1 = '"%s"' % s1
return s1
+
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
@@ -122,26 +125,33 @@ class MixedContainer(object):
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
+
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
+
def getCategory(self):
return self.category
+
def getContenttype(self, content_type):
return self.content_type
+
def getValue(self):
return self.value
+
def getName(self):
return self.name
+
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
- self.value.export(outfile, level, namespace,name)
+ self.value.export(outfile, level, namespace, name)
+
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s%s>' % (self.name, self.value, self.name))
@@ -153,19 +163,20 @@ def exportSimple(self, outfile, level, name):
outfile.write('<%s>%f%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g%s>' % (self.name, self.value, self.name))
+
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s",\n' % \
- (self.category, self.content_type, self.name,))
+ outfile.write('MixedContainer(%d, %d, "%s",\n' %
+ (self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
@@ -176,6 +187,7 @@ def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
+
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
@@ -191,9 +203,11 @@ def get_container(self): return self.container
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, version=None, compounddef=None):
self.version = version
self.compounddef = compounddef
+
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
@@ -204,6 +218,7 @@ def get_compounddef(self): return self.compounddef
def set_compounddef(self, compounddef): self.compounddef = compounddef
def get_version(self): return self.version
def set_version(self, version): self.version = version
+
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -215,27 +230,34 @@ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
outfile.write(' version=%s' % (quote_attrib(self.version), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
if self.compounddef:
- self.compounddef.export(outfile, level, namespace_, name_='compounddef')
+ self.compounddef.export(
+ outfile, level, namespace_, name_='compounddef')
+
def hasContent_(self):
if (
self.compounddef is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = "%s",\n' % (self.version,))
+
def exportLiteralChildren(self, outfile, level, name_):
if self.compounddef:
showIndent(outfile, level)
@@ -243,18 +265,21 @@ def exportLiteralChildren(self, outfile, level, name_):
self.compounddef.exportLiteral(outfile, level, name_='compounddef')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compounddef':
+ nodeName_ == 'compounddef':
obj_ = compounddefType.factory()
obj_.build(child_)
self.set_compounddef(obj_)
@@ -264,6 +289,7 @@ def buildChildren(self, child_, nodeName_):
class compounddefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
self.kind = kind
self.prot = prot
@@ -324,6 +350,7 @@ def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None,
self.programlisting = programlisting
self.location = location
self.listofallmembers = listofallmembers
+
def factory(*args_, **kwargs_):
if compounddefType.subclass:
return compounddefType.subclass(*args_, **kwargs_)
@@ -335,13 +362,23 @@ def set_compoundname(self, compoundname): self.compoundname = compoundname
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_basecompoundref(self): return self.basecompoundref
- def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref
+ def set_basecompoundref(
+ self, basecompoundref): self.basecompoundref = basecompoundref
+
def add_basecompoundref(self, value): self.basecompoundref.append(value)
- def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value
+ def insert_basecompoundref(
+ self, index, value): self.basecompoundref[index] = value
+
def get_derivedcompoundref(self): return self.derivedcompoundref
- def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
- def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value)
- def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value
+
+ def set_derivedcompoundref(
+ self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
+
+ def add_derivedcompoundref(
+ self, value): self.derivedcompoundref.append(value)
+ def insert_derivedcompoundref(
+ self, index, value): self.derivedcompoundref[index] = value
+
def get_includes(self): return self.includes
def set_includes(self, includes): self.includes = includes
def add_includes(self, value): self.includes.append(value)
@@ -353,7 +390,9 @@ def insert_includedby(self, index, value): self.includedby[index] = value
def get_incdepgraph(self): return self.incdepgraph
def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph
def get_invincdepgraph(self): return self.invincdepgraph
- def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph
+ def set_invincdepgraph(
+ self, invincdepgraph): self.invincdepgraph = invincdepgraph
+
def get_innerdir(self): return self.innerdir
def set_innerdir(self, innerdir): self.innerdir = innerdir
def add_innerdir(self, value): self.innerdir.append(value)
@@ -367,9 +406,13 @@ def set_innerclass(self, innerclass): self.innerclass = innerclass
def add_innerclass(self, value): self.innerclass.append(value)
def insert_innerclass(self, index, value): self.innerclass[index] = value
def get_innernamespace(self): return self.innernamespace
- def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace
+ def set_innernamespace(
+ self, innernamespace): self.innernamespace = innernamespace
+
def add_innernamespace(self, value): self.innernamespace.append(value)
- def insert_innernamespace(self, index, value): self.innernamespace[index] = value
+ def insert_innernamespace(
+ self, index, value): self.innernamespace[index] = value
+
def get_innerpage(self): return self.innerpage
def set_innerpage(self, innerpage): self.innerpage = innerpage
def add_innerpage(self, value): self.innerpage.append(value)
@@ -379,35 +422,51 @@ def set_innergroup(self, innergroup): self.innergroup = innergroup
def add_innergroup(self, value): self.innergroup.append(value)
def insert_innergroup(self, index, value): self.innergroup[index] = value
def get_templateparamlist(self): return self.templateparamlist
- def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def set_templateparamlist(
+ self, templateparamlist): self.templateparamlist = templateparamlist
+
def get_sectiondef(self): return self.sectiondef
def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef
def add_sectiondef(self, value): self.sectiondef.append(value)
def insert_sectiondef(self, index, value): self.sectiondef[index] = value
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def set_detaileddescription(
+ self, detaileddescription): self.detaileddescription = detaileddescription
+
def get_inheritancegraph(self): return self.inheritancegraph
- def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph
+ def set_inheritancegraph(
+ self, inheritancegraph): self.inheritancegraph = inheritancegraph
+
def get_collaborationgraph(self): return self.collaborationgraph
- def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph
+ def set_collaborationgraph(
+ self, collaborationgraph): self.collaborationgraph = collaborationgraph
+
def get_programlisting(self): return self.programlisting
- def set_programlisting(self, programlisting): self.programlisting = programlisting
+ def set_programlisting(
+ self, programlisting): self.programlisting = programlisting
+
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_listofallmembers(self): return self.listofallmembers
- def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers
+ def set_listofallmembers(
+ self, listofallmembers): self.listofallmembers = listofallmembers
+
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='compounddefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='compounddefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -415,32 +474,41 @@ def export(self, outfile, level, namespace_='', name_='compounddefType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'):
if self.compoundname is not None:
showIndent(outfile, level)
- outfile.write('<%scompoundname>%s%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
+ outfile.write('<%scompoundname>%s%scompoundname>\n' % (namespace_, self.format_string(
+ quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
if self.title is not None:
showIndent(outfile, level)
- outfile.write('<%stitle>%s%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
+ outfile.write('<%stitle>%s%stitle>\n' % (namespace_, self.format_string(
+ quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
for basecompoundref_ in self.basecompoundref:
- basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref')
+ basecompoundref_.export(
+ outfile, level, namespace_, name_='basecompoundref')
for derivedcompoundref_ in self.derivedcompoundref:
- derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref')
+ derivedcompoundref_.export(
+ outfile, level, namespace_, name_='derivedcompoundref')
for includes_ in self.includes:
includes_.export(outfile, level, namespace_, name_='includes')
for includedby_ in self.includedby:
includedby_.export(outfile, level, namespace_, name_='includedby')
if self.incdepgraph:
- self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph')
+ self.incdepgraph.export(
+ outfile, level, namespace_, name_='incdepgraph')
if self.invincdepgraph:
- self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph')
+ self.invincdepgraph.export(
+ outfile, level, namespace_, name_='invincdepgraph')
for innerdir_ in self.innerdir:
innerdir_.export(outfile, level, namespace_, name_='innerdir')
for innerfile_ in self.innerfile:
@@ -448,29 +516,38 @@ def exportChildren(self, outfile, level, namespace_='', name_='compounddefType')
for innerclass_ in self.innerclass:
innerclass_.export(outfile, level, namespace_, name_='innerclass')
for innernamespace_ in self.innernamespace:
- innernamespace_.export(outfile, level, namespace_, name_='innernamespace')
+ innernamespace_.export(
+ outfile, level, namespace_, name_='innernamespace')
for innerpage_ in self.innerpage:
innerpage_.export(outfile, level, namespace_, name_='innerpage')
for innergroup_ in self.innergroup:
innergroup_.export(outfile, level, namespace_, name_='innergroup')
if self.templateparamlist:
- self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ self.templateparamlist.export(
+ outfile, level, namespace_, name_='templateparamlist')
for sectiondef_ in self.sectiondef:
sectiondef_.export(outfile, level, namespace_, name_='sectiondef')
if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ self.briefdescription.export(
+ outfile, level, namespace_, name_='briefdescription')
if self.detaileddescription:
- self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ self.detaileddescription.export(
+ outfile, level, namespace_, name_='detaileddescription')
if self.inheritancegraph:
- self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph')
+ self.inheritancegraph.export(
+ outfile, level, namespace_, name_='inheritancegraph')
if self.collaborationgraph:
- self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph')
+ self.collaborationgraph.export(
+ outfile, level, namespace_, name_='collaborationgraph')
if self.programlisting:
- self.programlisting.export(outfile, level, namespace_, name_='programlisting')
+ self.programlisting.export(
+ outfile, level, namespace_, name_='programlisting')
if self.location:
self.location.export(outfile, level, namespace_, name_='location')
if self.listofallmembers:
- self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers')
+ self.listofallmembers.export(
+ outfile, level, namespace_, name_='listofallmembers')
+
def hasContent_(self):
if (
self.compoundname is not None or
@@ -496,15 +573,17 @@ def hasContent_(self):
self.programlisting is not None or
self.location is not None or
self.listofallmembers is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='compounddefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
@@ -515,9 +594,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding))
+ outfile.write('compoundname=%s,\n' % quote_python(
+ self.compoundname).encode(ExternalEncoding))
if self.title:
showIndent(outfile, level)
outfile.write('title=model_.xsd_string(\n')
@@ -530,7 +611,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for basecompoundref in self.basecompoundref:
showIndent(outfile, level)
outfile.write('model_.basecompoundref(\n')
- basecompoundref.exportLiteral(outfile, level, name_='basecompoundref')
+ basecompoundref.exportLiteral(
+ outfile, level, name_='basecompoundref')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -542,7 +624,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for derivedcompoundref in self.derivedcompoundref:
showIndent(outfile, level)
outfile.write('model_.derivedcompoundref(\n')
- derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref')
+ derivedcompoundref.exportLiteral(
+ outfile, level, name_='derivedcompoundref')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -581,7 +664,8 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.invincdepgraph:
showIndent(outfile, level)
outfile.write('invincdepgraph=model_.graphType(\n')
- self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph')
+ self.invincdepgraph.exportLiteral(
+ outfile, level, name_='invincdepgraph')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
@@ -626,7 +710,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for innernamespace in self.innernamespace:
showIndent(outfile, level)
outfile.write('model_.innernamespace(\n')
- innernamespace.exportLiteral(outfile, level, name_='innernamespace')
+ innernamespace.exportLiteral(
+ outfile, level, name_='innernamespace')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -659,7 +744,8 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.templateparamlist:
showIndent(outfile, level)
outfile.write('templateparamlist=model_.templateparamlistType(\n')
- self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ self.templateparamlist.exportLiteral(
+ outfile, level, name_='templateparamlist')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
@@ -677,31 +763,36 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ self.briefdescription.exportLiteral(
+ outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.detaileddescription:
showIndent(outfile, level)
outfile.write('detaileddescription=model_.descriptionType(\n')
- self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ self.detaileddescription.exportLiteral(
+ outfile, level, name_='detaileddescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.inheritancegraph:
showIndent(outfile, level)
outfile.write('inheritancegraph=model_.graphType(\n')
- self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph')
+ self.inheritancegraph.exportLiteral(
+ outfile, level, name_='inheritancegraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.collaborationgraph:
showIndent(outfile, level)
outfile.write('collaborationgraph=model_.graphType(\n')
- self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph')
+ self.collaborationgraph.exportLiteral(
+ outfile, level, name_='collaborationgraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.programlisting:
showIndent(outfile, level)
outfile.write('programlisting=model_.listingType(\n')
- self.programlisting.exportLiteral(outfile, level, name_='programlisting')
+ self.programlisting.exportLiteral(
+ outfile, level, name_='programlisting')
showIndent(outfile, level)
outfile.write('),\n')
if self.location:
@@ -713,15 +804,18 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.listofallmembers:
showIndent(outfile, level)
outfile.write('listofallmembers=model_.listofallmembersType(\n')
- self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers')
+ self.listofallmembers.exportLiteral(
+ outfile, level, name_='listofallmembers')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
@@ -729,120 +823,121 @@ def buildAttributes(self, attrs):
self.prot = attrs.get('prot').value
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compoundname':
+ nodeName_ == 'compoundname':
compoundname_ = ''
for text__content_ in child_.childNodes:
compoundname_ += text__content_.nodeValue
self.compoundname = compoundname_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_title(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'basecompoundref':
+ nodeName_ == 'basecompoundref':
obj_ = compoundRefType.factory()
obj_.build(child_)
self.basecompoundref.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'derivedcompoundref':
+ nodeName_ == 'derivedcompoundref':
obj_ = compoundRefType.factory()
obj_.build(child_)
self.derivedcompoundref.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'includes':
+ nodeName_ == 'includes':
obj_ = incType.factory()
obj_.build(child_)
self.includes.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'includedby':
+ nodeName_ == 'includedby':
obj_ = incType.factory()
obj_.build(child_)
self.includedby.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'incdepgraph':
+ nodeName_ == 'incdepgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_incdepgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'invincdepgraph':
+ nodeName_ == 'invincdepgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_invincdepgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerdir':
+ nodeName_ == 'innerdir':
obj_ = refType.factory()
obj_.build(child_)
self.innerdir.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerfile':
+ nodeName_ == 'innerfile':
obj_ = refType.factory()
obj_.build(child_)
self.innerfile.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerclass':
+ nodeName_ == 'innerclass':
obj_ = refType.factory()
obj_.build(child_)
self.innerclass.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innernamespace':
+ nodeName_ == 'innernamespace':
obj_ = refType.factory()
obj_.build(child_)
self.innernamespace.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerpage':
+ nodeName_ == 'innerpage':
obj_ = refType.factory()
obj_.build(child_)
self.innerpage.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innergroup':
+ nodeName_ == 'innergroup':
obj_ = refType.factory()
obj_.build(child_)
self.innergroup.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'templateparamlist':
+ nodeName_ == 'templateparamlist':
obj_ = templateparamlistType.factory()
obj_.build(child_)
self.set_templateparamlist(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sectiondef':
+ nodeName_ == 'sectiondef':
obj_ = sectiondefType.factory()
obj_.build(child_)
self.sectiondef.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
+ nodeName_ == 'detaileddescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'inheritancegraph':
+ nodeName_ == 'inheritancegraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_inheritancegraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'collaborationgraph':
+ nodeName_ == 'collaborationgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_collaborationgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'programlisting':
+ nodeName_ == 'programlisting':
obj_ = listingType.factory()
obj_.build(child_)
self.set_programlisting(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'location':
+ nodeName_ == 'location':
obj_ = locationType.factory()
obj_.build(child_)
self.set_location(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'listofallmembers':
+ nodeName_ == 'listofallmembers':
obj_ = listofallmembersType.factory()
obj_.build(child_)
self.set_listofallmembers(obj_)
@@ -852,11 +947,13 @@ def buildChildren(self, child_, nodeName_):
class listofallmembersType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, member=None):
if member is None:
self.member = []
else:
self.member = member
+
def factory(*args_, **kwargs_):
if listofallmembersType.subclass:
return listofallmembersType.subclass(*args_, **kwargs_)
@@ -867,10 +964,12 @@ def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
+
def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='listofallmembersType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -878,25 +977,31 @@ def export(self, outfile, level, namespace_='', name_='listofallmembersType', na
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
+
def hasContent_(self):
if (
self.member is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='listofallmembersType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('member=[\n')
@@ -910,17 +1015,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'member':
+ nodeName_ == 'member':
obj_ = memberRefType.factory()
obj_.build(child_)
self.member.append(obj_)
@@ -930,6 +1038,7 @@ def buildChildren(self, child_, nodeName_):
class memberRefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None):
self.virt = virt
self.prot = prot
@@ -937,6 +1046,7 @@ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=
self.ambiguityscope = ambiguityscope
self.scope = scope
self.name = name
+
def factory(*args_, **kwargs_):
if memberRefType.subclass:
return memberRefType.subclass(*args_, **kwargs_)
@@ -954,11 +1064,15 @@ def set_prot(self, prot): self.prot = prot
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_ambiguityscope(self): return self.ambiguityscope
- def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope
+
+ def set_ambiguityscope(
+ self, ambiguityscope): self.ambiguityscope = ambiguityscope
+
def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='memberRefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='memberRefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -966,35 +1080,44 @@ def export(self, outfile, level, namespace_='', name_='memberRefType', namespace
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'):
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.ambiguityscope is not None:
- outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
+ outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(
+ self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'):
if self.scope is not None:
showIndent(outfile, level)
- outfile.write('<%sscope>%s%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
+ outfile.write('<%sscope>%s%sscope>\n' % (namespace_, self.format_string(
+ quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+
def hasContent_(self):
if (
self.scope is not None or
self.name is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='memberRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.virt is not None:
showIndent(outfile, level)
@@ -1008,17 +1131,22 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.ambiguityscope is not None:
showIndent(outfile, level)
outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding))
+ outfile.write('scope=%s,\n' % quote_python(
+ self.scope).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('virt'):
self.virt = attrs.get('virt').value
@@ -1028,15 +1156,16 @@ def buildAttributes(self, attrs):
self.refid = attrs.get('refid').value
if attrs.get('ambiguityscope'):
self.ambiguityscope = attrs.get('ambiguityscope').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'scope':
+ nodeName_ == 'scope':
scope_ = ''
for text__content_ in child_.childNodes:
scope_ += text__content_.nodeValue
self.scope = scope_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
@@ -1047,8 +1176,10 @@ def buildChildren(self, child_, nodeName_):
class scope(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if scope.subclass:
return scope.subclass(*args_, **kwargs_)
@@ -1057,6 +1188,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1068,33 +1200,40 @@ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='scope'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='scope'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='scope'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1102,21 +1241,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class scope
class name(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if name.subclass:
return name.subclass(*args_, **kwargs_)
@@ -1125,6 +1268,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1136,33 +1280,40 @@ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='name'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='name'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='name'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1170,19 +1321,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class name
class compoundRefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.virt = virt
self.prot = prot
@@ -1195,6 +1349,7 @@ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=No
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if compoundRefType.subclass:
return compoundRefType.subclass(*args_, **kwargs_)
@@ -1209,40 +1364,48 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='compoundRefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='compoundRefType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'):
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='compoundRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.virt is not None:
showIndent(outfile, level)
@@ -1253,9 +1416,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1263,6 +1428,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('virt'):
self.virt = attrs.get('virt').value
@@ -1270,21 +1436,23 @@ def buildAttributes(self, attrs):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class compoundRefType
class reimplementType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
if mixedclass_ is None:
@@ -1295,6 +1463,7 @@ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if reimplementType.subclass:
return reimplementType.subclass(*args_, **kwargs_)
@@ -1305,43 +1474,53 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='reimplementType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='reimplementType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='reimplementType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1349,24 +1528,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class reimplementType
class incType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.local = local
self.refid = refid
@@ -1378,6 +1560,7 @@ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, conten
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if incType.subclass:
return incType.subclass(*args_, **kwargs_)
@@ -1390,6 +1573,7 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1397,31 +1581,37 @@ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_='
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='incType'):
if self.local is not None:
outfile.write(' local=%s' % (quote_attrib(self.local), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='incType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='incType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.local is not None:
showIndent(outfile, level)
@@ -1429,9 +1619,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1439,26 +1631,29 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('local'):
self.local = attrs.get('local').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class incType
class refType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.prot = prot
self.refid = refid
@@ -1470,6 +1665,7 @@ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if refType.subclass:
return refType.subclass(*args_, **kwargs_)
@@ -1482,6 +1678,7 @@ def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1489,31 +1686,37 @@ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_='
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='refType'):
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='refType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='refType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.prot is not None:
showIndent(outfile, level)
@@ -1521,9 +1724,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1531,26 +1736,29 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class refType
class refTextType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
self.kindref = kindref
@@ -1563,6 +1771,7 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if refTextType.subclass:
return refTextType.subclass(*args_, **kwargs_)
@@ -1577,6 +1786,7 @@ def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -1584,33 +1794,40 @@ def export(self, outfile, level, namespace_='', name_='refTextType', namespacede
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.kindref is not None:
outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(
+ self.external).encode(ExternalEncoding), input_name='external'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='refTextType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='refTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
@@ -1621,9 +1838,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -1631,6 +1850,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
@@ -1638,21 +1858,23 @@ def buildAttributes(self, attrs):
self.kindref = attrs.get('kindref').value
if attrs.get('external'):
self.external = attrs.get('external').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class refTextType
class sectiondefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, header=None, description=None, memberdef=None):
self.kind = kind
self.header = header
@@ -1661,6 +1883,7 @@ def __init__(self, kind=None, header=None, description=None, memberdef=None):
self.memberdef = []
else:
self.memberdef = memberdef
+
def factory(*args_, **kwargs_):
if sectiondefType.subclass:
return sectiondefType.subclass(*args_, **kwargs_)
@@ -1677,10 +1900,12 @@ def add_memberdef(self, value): self.memberdef.append(value)
def insert_memberdef(self, index, value): self.memberdef[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
+
def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='sectiondefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='sectiondefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -1688,38 +1913,47 @@ def export(self, outfile, level, namespace_='', name_='sectiondefType', namespac
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'):
if self.header is not None:
showIndent(outfile, level)
- outfile.write('<%sheader>%s%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
+ outfile.write('<%sheader>%s%sheader>\n' % (namespace_, self.format_string(
+ quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
if self.description:
- self.description.export(outfile, level, namespace_, name_='description')
+ self.description.export(
+ outfile, level, namespace_, name_='description')
for memberdef_ in self.memberdef:
memberdef_.export(outfile, level, namespace_, name_='memberdef')
+
def hasContent_(self):
if (
self.header is not None or
self.description is not None or
self.memberdef is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='sectiondefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding))
+ outfile.write('header=%s,\n' % quote_python(
+ self.header).encode(ExternalEncoding))
if self.description:
showIndent(outfile, level)
outfile.write('description=model_.descriptionType(\n')
@@ -1738,29 +1972,32 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'header':
+ nodeName_ == 'header':
header_ = ''
for text__content_ in child_.childNodes:
header_ += text__content_.nodeValue
self.header = header_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'description':
+ nodeName_ == 'description':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_description(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'memberdef':
+ nodeName_ == 'memberdef':
obj_ = memberdefType.factory()
obj_.build(child_)
self.memberdef.append(obj_)
@@ -1770,6 +2007,7 @@ def buildChildren(self, child_, nodeName_):
class memberdefType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
self.initonly = initonly
self.kind = kind
@@ -1830,6 +2068,7 @@ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=
self.referencedby = []
else:
self.referencedby = referencedby
+
def factory(*args_, **kwargs_):
if memberdefType.subclass:
return memberdefType.subclass(*args_, **kwargs_)
@@ -1837,7 +2076,9 @@ def factory(*args_, **kwargs_):
return memberdefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_templateparamlist(self): return self.templateparamlist
- def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def set_templateparamlist(
+ self, templateparamlist): self.templateparamlist = templateparamlist
+
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_definition(self): return self.definition
@@ -1855,11 +2096,17 @@ def set_bitfield(self, bitfield): self.bitfield = bitfield
def get_reimplements(self): return self.reimplements
def set_reimplements(self, reimplements): self.reimplements = reimplements
def add_reimplements(self, value): self.reimplements.append(value)
- def insert_reimplements(self, index, value): self.reimplements[index] = value
+ def insert_reimplements(
+ self, index, value): self.reimplements[index] = value
+
def get_reimplementedby(self): return self.reimplementedby
- def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby
+ def set_reimplementedby(
+ self, reimplementedby): self.reimplementedby = reimplementedby
+
def add_reimplementedby(self, value): self.reimplementedby.append(value)
- def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value
+ def insert_reimplementedby(
+ self, index, value): self.reimplementedby[index] = value
+
def get_param(self): return self.param
def set_param(self, param): self.param = param
def add_param(self, value): self.param.append(value)
@@ -1873,11 +2120,17 @@ def set_initializer(self, initializer): self.initializer = initializer
def get_exceptions(self): return self.exceptions
def set_exceptions(self, exceptions): self.exceptions = exceptions
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def set_detaileddescription(
+ self, detaileddescription): self.detaileddescription = detaileddescription
+
def get_inbodydescription(self): return self.inbodydescription
- def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription
+ def set_inbodydescription(
+ self, inbodydescription): self.inbodydescription = inbodydescription
+
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_references(self): return self.references
@@ -1887,7 +2140,9 @@ def insert_references(self, index, value): self.references[index] = value
def get_referencedby(self): return self.referencedby
def set_referencedby(self, referencedby): self.referencedby = referencedby
def add_referencedby(self, value): self.referencedby.append(value)
- def insert_referencedby(self, index, value): self.referencedby[index] = value
+ def insert_referencedby(
+ self, index, value): self.referencedby[index] = value
+
def get_initonly(self): return self.initonly
def set_initonly(self, initonly): self.initonly = initonly
def get_kind(self): return self.kind
@@ -1930,10 +2185,12 @@ def get_settable(self): return self.settable
def set_settable(self, settable): self.settable = settable
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='memberdefType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='memberdefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -1941,6 +2198,7 @@ def export(self, outfile, level, namespace_='', name_='memberdefType', namespace
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'):
if self.initonly is not None:
outfile.write(' initonly=%s' % (quote_attrib(self.initonly), ))
@@ -1983,54 +2241,73 @@ def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType')
if self.settable is not None:
outfile.write(' settable=%s' % (quote_attrib(self.settable), ))
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'):
if self.templateparamlist:
- self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ self.templateparamlist.export(
+ outfile, level, namespace_, name_='templateparamlist')
if self.type_:
self.type_.export(outfile, level, namespace_, name_='type')
if self.definition is not None:
showIndent(outfile, level)
- outfile.write('<%sdefinition>%s%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
+ outfile.write('<%sdefinition>%s%sdefinition>\n' % (namespace_, self.format_string(
+ quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
if self.argsstring is not None:
showIndent(outfile, level)
- outfile.write('<%sargsstring>%s%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
+ outfile.write('<%sargsstring>%s%sargsstring>\n' % (namespace_, self.format_string(
+ quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
if self.read is not None:
showIndent(outfile, level)
- outfile.write('<%sread>%s%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
+ outfile.write('<%sread>%s%sread>\n' % (namespace_, self.format_string(
+ quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
if self.write is not None:
showIndent(outfile, level)
- outfile.write('<%swrite>%s%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
+ outfile.write('<%swrite>%s%swrite>\n' % (namespace_, self.format_string(
+ quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
if self.bitfield is not None:
showIndent(outfile, level)
- outfile.write('<%sbitfield>%s%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
+ outfile.write('<%sbitfield>%s%sbitfield>\n' % (namespace_, self.format_string(
+ quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
for reimplements_ in self.reimplements:
- reimplements_.export(outfile, level, namespace_, name_='reimplements')
+ reimplements_.export(
+ outfile, level, namespace_, name_='reimplements')
for reimplementedby_ in self.reimplementedby:
- reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby')
+ reimplementedby_.export(
+ outfile, level, namespace_, name_='reimplementedby')
for param_ in self.param:
param_.export(outfile, level, namespace_, name_='param')
for enumvalue_ in self.enumvalue:
enumvalue_.export(outfile, level, namespace_, name_='enumvalue')
if self.initializer:
- self.initializer.export(outfile, level, namespace_, name_='initializer')
+ self.initializer.export(
+ outfile, level, namespace_, name_='initializer')
if self.exceptions:
- self.exceptions.export(outfile, level, namespace_, name_='exceptions')
+ self.exceptions.export(
+ outfile, level, namespace_, name_='exceptions')
if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ self.briefdescription.export(
+ outfile, level, namespace_, name_='briefdescription')
if self.detaileddescription:
- self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ self.detaileddescription.export(
+ outfile, level, namespace_, name_='detaileddescription')
if self.inbodydescription:
- self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription')
+ self.inbodydescription.export(
+ outfile, level, namespace_, name_='inbodydescription')
if self.location:
- self.location.export(outfile, level, namespace_, name_='location', )
+ self.location.export(
+ outfile, level, namespace_, name_='location', )
for references_ in self.references:
references_.export(outfile, level, namespace_, name_='references')
for referencedby_ in self.referencedby:
- referencedby_.export(outfile, level, namespace_, name_='referencedby')
+ referencedby_.export(
+ outfile, level, namespace_, name_='referencedby')
+
def hasContent_(self):
if (
self.templateparamlist is not None or
@@ -2053,15 +2330,17 @@ def hasContent_(self):
self.location is not None or
self.references is not None or
self.referencedby is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='memberdefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.initonly is not None:
showIndent(outfile, level)
@@ -2126,11 +2405,13 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
if self.templateparamlist:
showIndent(outfile, level)
outfile.write('templateparamlist=model_.templateparamlistType(\n')
- self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ self.templateparamlist.exportLiteral(
+ outfile, level, name_='templateparamlist')
showIndent(outfile, level)
outfile.write('),\n')
if self.type_:
@@ -2140,17 +2421,23 @@ def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
- outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding))
+ outfile.write('definition=%s,\n' % quote_python(
+ self.definition).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding))
+ outfile.write('argsstring=%s,\n' % quote_python(
+ self.argsstring).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding))
+ outfile.write('read=%s,\n' % quote_python(
+ self.read).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding))
+ outfile.write('write=%s,\n' % quote_python(
+ self.write).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding))
+ outfile.write('bitfield=%s,\n' % quote_python(
+ self.bitfield).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('reimplements=[\n')
level += 1
@@ -2169,7 +2456,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for reimplementedby in self.reimplementedby:
showIndent(outfile, level)
outfile.write('model_.reimplementedby(\n')
- reimplementedby.exportLiteral(outfile, level, name_='reimplementedby')
+ reimplementedby.exportLiteral(
+ outfile, level, name_='reimplementedby')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -2214,19 +2502,22 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ self.briefdescription.exportLiteral(
+ outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.detaileddescription:
showIndent(outfile, level)
outfile.write('detaileddescription=model_.descriptionType(\n')
- self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ self.detaileddescription.exportLiteral(
+ outfile, level, name_='detaileddescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.inbodydescription:
showIndent(outfile, level)
outfile.write('inbodydescription=model_.descriptionType(\n')
- self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription')
+ self.inbodydescription.exportLiteral(
+ outfile, level, name_='inbodydescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.location:
@@ -2259,12 +2550,14 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('initonly'):
self.initonly = attrs.get('initonly').value
@@ -2308,110 +2601,111 @@ def buildAttributes(self, attrs):
self.settable = attrs.get('settable').value
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'templateparamlist':
+ nodeName_ == 'templateparamlist':
obj_ = templateparamlistType.factory()
obj_.build(child_)
self.set_templateparamlist(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'type':
+ nodeName_ == 'type':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_type(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'definition':
+ nodeName_ == 'definition':
definition_ = ''
for text__content_ in child_.childNodes:
definition_ += text__content_.nodeValue
self.definition = definition_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'argsstring':
+ nodeName_ == 'argsstring':
argsstring_ = ''
for text__content_ in child_.childNodes:
argsstring_ += text__content_.nodeValue
self.argsstring = argsstring_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'read':
+ nodeName_ == 'read':
read_ = ''
for text__content_ in child_.childNodes:
read_ += text__content_.nodeValue
self.read = read_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'write':
+ nodeName_ == 'write':
write_ = ''
for text__content_ in child_.childNodes:
write_ += text__content_.nodeValue
self.write = write_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'bitfield':
+ nodeName_ == 'bitfield':
bitfield_ = ''
for text__content_ in child_.childNodes:
bitfield_ += text__content_.nodeValue
self.bitfield = bitfield_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'reimplements':
+ nodeName_ == 'reimplements':
obj_ = reimplementType.factory()
obj_.build(child_)
self.reimplements.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'reimplementedby':
+ nodeName_ == 'reimplementedby':
obj_ = reimplementType.factory()
obj_.build(child_)
self.reimplementedby.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'param':
+ nodeName_ == 'param':
obj_ = paramType.factory()
obj_.build(child_)
self.param.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'enumvalue':
+ nodeName_ == 'enumvalue':
obj_ = enumvalueType.factory()
obj_.build(child_)
self.enumvalue.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'initializer':
+ nodeName_ == 'initializer':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_initializer(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'exceptions':
+ nodeName_ == 'exceptions':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_exceptions(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
+ nodeName_ == 'detaileddescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'inbodydescription':
+ nodeName_ == 'inbodydescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_inbodydescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'location':
+ nodeName_ == 'location':
obj_ = locationType.factory()
obj_.build(child_)
self.set_location(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'references':
+ nodeName_ == 'references':
obj_ = referenceType.factory()
obj_.build(child_)
self.references.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'referencedby':
+ nodeName_ == 'referencedby':
obj_ = referenceType.factory()
obj_.build(child_)
self.referencedby.append(obj_)
@@ -2421,8 +2715,10 @@ def buildChildren(self, child_, nodeName_):
class definition(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if definition.subclass:
return definition.subclass(*args_, **kwargs_)
@@ -2431,6 +2727,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2442,33 +2739,40 @@ def export(self, outfile, level, namespace_='', name_='definition', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='definition'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='definition'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='definition'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2476,21 +2780,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class definition
class argsstring(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if argsstring.subclass:
return argsstring.subclass(*args_, **kwargs_)
@@ -2499,6 +2807,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2510,33 +2819,40 @@ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='argsstring'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='argsstring'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2544,21 +2860,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class argsstring
class read(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if read.subclass:
return read.subclass(*args_, **kwargs_)
@@ -2567,6 +2887,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2578,33 +2899,40 @@ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='read'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='read'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='read'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2612,21 +2940,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class read
class write(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if write.subclass:
return write.subclass(*args_, **kwargs_)
@@ -2635,6 +2967,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2646,33 +2979,40 @@ def export(self, outfile, level, namespace_='', name_='write', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='write'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='write'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='write'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2680,21 +3020,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class write
class bitfield(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if bitfield.subclass:
return bitfield.subclass(*args_, **kwargs_)
@@ -2703,6 +3047,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -2714,33 +3059,40 @@ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='bitfield'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='bitfield'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -2748,19 +3100,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class bitfield
class descriptionType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -2770,6 +3125,7 @@ def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if descriptionType.subclass:
return descriptionType.subclass(*args_, **kwargs_)
@@ -2788,35 +3144,43 @@ def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
+
def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='descriptionType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='descriptionType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect1 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='descriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -2842,46 +3206,49 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
+ nodeName_ == 'sect1':
childobj_ = docSect1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect1', childobj_)
+ MixedContainer.TypeNone, 'sect1', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class descriptionType
@@ -2889,6 +3256,7 @@ def buildChildren(self, child_, nodeName_):
class enumvalueType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
self.prot = prot
self.id = id
@@ -2900,6 +3268,7 @@ def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescrip
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if enumvalueType.subclass:
return enumvalueType.subclass(*args_, **kwargs_)
@@ -2911,43 +3280,55 @@ def set_name(self, name): self.name = name
def get_initializer(self): return self.initializer
def set_initializer(self, initializer): self.initializer = initializer
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def set_detaileddescription(
+ self, detaileddescription): self.detaileddescription = detaileddescription
+
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='enumvalueType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='enumvalueType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'):
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.name is not None or
self.initializer is not None or
self.briefdescription is not None or
self.detaileddescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='enumvalueType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.prot is not None:
showIndent(outfile, level)
@@ -2955,6 +3336,7 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -2980,51 +3362,54 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
- MixedContainer.TypeString, 'name', valuestr_)
+ MixedContainer.TypeString, 'name', valuestr_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'initializer':
+ nodeName_ == 'initializer':
childobj_ = linkedTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'initializer', childobj_)
+ MixedContainer.TypeNone, 'initializer', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
childobj_ = descriptionType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'briefdescription', childobj_)
+ MixedContainer.TypeNone, 'briefdescription', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
+ nodeName_ == 'detaileddescription':
childobj_ = descriptionType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'detaileddescription', childobj_)
+ MixedContainer.TypeNone, 'detaileddescription', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class enumvalueType
@@ -3032,11 +3417,13 @@ def buildChildren(self, child_, nodeName_):
class templateparamlistType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, param=None):
if param is None:
self.param = []
else:
self.param = param
+
def factory(*args_, **kwargs_):
if templateparamlistType.subclass:
return templateparamlistType.subclass(*args_, **kwargs_)
@@ -3047,10 +3434,12 @@ def get_param(self): return self.param
def set_param(self, param): self.param = param
def add_param(self, value): self.param.append(value)
def insert_param(self, index, value): self.param[index] = value
+
def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='templateparamlistType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -3058,25 +3447,31 @@ def export(self, outfile, level, namespace_='', name_='templateparamlistType', n
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'):
for param_ in self.param:
param_.export(outfile, level, namespace_, name_='param')
+
def hasContent_(self):
if (
self.param is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='templateparamlistType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('param=[\n')
@@ -3090,17 +3485,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'param':
+ nodeName_ == 'param':
obj_ = paramType.factory()
obj_.build(child_)
self.param.append(obj_)
@@ -3110,6 +3508,7 @@ def buildChildren(self, child_, nodeName_):
class paramType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None):
self.type_ = type_
self.declname = declname
@@ -3117,6 +3516,7 @@ def __init__(self, type_=None, declname=None, defname=None, array=None, defval=N
self.array = array
self.defval = defval
self.briefdescription = briefdescription
+
def factory(*args_, **kwargs_):
if paramType.subclass:
return paramType.subclass(*args_, **kwargs_)
@@ -3134,7 +3534,10 @@ def set_array(self, array): self.array = array
def get_defval(self): return self.defval
def set_defval(self, defval): self.defval = defval
def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+
+ def set_briefdescription(
+ self, briefdescription): self.briefdescription = briefdescription
+
def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3146,24 +3549,31 @@ def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='paramType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='paramType'):
if self.type_:
self.type_.export(outfile, level, namespace_, name_='type')
if self.declname is not None:
showIndent(outfile, level)
- outfile.write('<%sdeclname>%s%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
+ outfile.write('<%sdeclname>%s%sdeclname>\n' % (namespace_, self.format_string(
+ quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
if self.defname is not None:
showIndent(outfile, level)
- outfile.write('<%sdefname>%s%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
+ outfile.write('<%sdefname>%s%sdefname>\n' % (namespace_, self.format_string(
+ quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
if self.array is not None:
showIndent(outfile, level)
- outfile.write('<%sarray>%s%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
+ outfile.write('<%sarray>%s%sarray>\n' % (namespace_, self.format_string(
+ quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
if self.defval:
self.defval.export(outfile, level, namespace_, name_='defval')
if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ self.briefdescription.export(
+ outfile, level, namespace_, name_='briefdescription')
+
def hasContent_(self):
if (
self.type_ is not None or
@@ -3172,17 +3582,20 @@ def hasContent_(self):
self.array is not None or
self.defval is not None or
self.briefdescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='paramType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
if self.type_:
showIndent(outfile, level)
@@ -3191,11 +3604,14 @@ def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
- outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding))
+ outfile.write('declname=%s,\n' % quote_python(
+ self.declname).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding))
+ outfile.write('defname=%s,\n' % quote_python(
+ self.defname).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding))
+ outfile.write('array=%s,\n' % quote_python(
+ self.array).encode(ExternalEncoding))
if self.defval:
showIndent(outfile, level)
outfile.write('defval=model_.linkedTextType(\n')
@@ -3205,48 +3621,52 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ self.briefdescription.exportLiteral(
+ outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'type':
+ nodeName_ == 'type':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_type(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'declname':
+ nodeName_ == 'declname':
declname_ = ''
for text__content_ in child_.childNodes:
declname_ += text__content_.nodeValue
self.declname = declname_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'defname':
+ nodeName_ == 'defname':
defname_ = ''
for text__content_ in child_.childNodes:
defname_ += text__content_.nodeValue
self.defname = defname_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'array':
+ nodeName_ == 'array':
array_ = ''
for text__content_ in child_.childNodes:
array_ += text__content_.nodeValue
self.array = array_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'defval':
+ nodeName_ == 'defval':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_defval(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
+ nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
@@ -3256,8 +3676,10 @@ def buildChildren(self, child_, nodeName_):
class declname(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if declname.subclass:
return declname.subclass(*args_, **kwargs_)
@@ -3266,6 +3688,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3277,33 +3700,40 @@ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='declname'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='declname'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='declname'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3311,21 +3741,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class declname
class defname(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if defname.subclass:
return defname.subclass(*args_, **kwargs_)
@@ -3334,6 +3768,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3345,33 +3780,40 @@ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_='
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='defname'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='defname'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='defname'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3379,21 +3821,25 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class defname
class array(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if array.subclass:
return array.subclass(*args_, **kwargs_)
@@ -3402,6 +3848,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3413,33 +3860,40 @@ def export(self, outfile, level, namespace_='', name_='array', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='array'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='array'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='array'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3447,19 +3901,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class array
class linkedTextType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, ref=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -3469,6 +3926,7 @@ def __init__(self, ref=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if linkedTextType.subclass:
return linkedTextType.subclass(*args_, **kwargs_)
@@ -3479,32 +3937,40 @@ def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
+
def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='linkedTextType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='linkedTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.ref is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='linkedTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -3512,25 +3978,28 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
+ nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
+ MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class linkedTextType
@@ -3538,11 +4007,13 @@ def buildChildren(self, child_, nodeName_):
class graphType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, node=None):
if node is None:
self.node = []
else:
self.node = node
+
def factory(*args_, **kwargs_):
if graphType.subclass:
return graphType.subclass(*args_, **kwargs_)
@@ -3553,6 +4024,7 @@ def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
+
def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3564,25 +4036,31 @@ def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='graphType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='graphType'):
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
+
def hasContent_(self):
if (
self.node is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='graphType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('node=[\n')
@@ -3596,17 +4074,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'node':
+ nodeName_ == 'node':
obj_ = nodeType.factory()
obj_.build(child_)
self.node.append(obj_)
@@ -3616,6 +4097,7 @@ def buildChildren(self, child_, nodeName_):
class nodeType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, label=None, link=None, childnode=None):
self.id = id
self.label = label
@@ -3624,6 +4106,7 @@ def __init__(self, id=None, label=None, link=None, childnode=None):
self.childnode = []
else:
self.childnode = childnode
+
def factory(*args_, **kwargs_):
if nodeType.subclass:
return nodeType.subclass(*args_, **kwargs_)
@@ -3640,6 +4123,7 @@ def add_childnode(self, value): self.childnode.append(value)
def insert_childnode(self, index, value): self.childnode[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3651,38 +4135,47 @@ def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='nodeType'):
if self.label is not None:
showIndent(outfile, level)
- outfile.write('<%slabel>%s%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
+ outfile.write('<%slabel>%s%slabel>\n' % (namespace_, self.format_string(
+ quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
if self.link:
self.link.export(outfile, level, namespace_, name_='link')
for childnode_ in self.childnode:
childnode_.export(outfile, level, namespace_, name_='childnode')
+
def hasContent_(self):
if (
self.label is not None or
self.link is not None or
self.childnode is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='nodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding))
+ outfile.write('label=%s,\n' % quote_python(
+ self.label).encode(ExternalEncoding))
if self.link:
showIndent(outfile, level)
outfile.write('link=model_.linkType(\n')
@@ -3701,29 +4194,32 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'label':
+ nodeName_ == 'label':
label_ = ''
for text__content_ in child_.childNodes:
label_ += text__content_.nodeValue
self.label = label_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'link':
+ nodeName_ == 'link':
obj_ = linkType.factory()
obj_.build(child_)
self.set_link(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'childnode':
+ nodeName_ == 'childnode':
obj_ = childnodeType.factory()
obj_.build(child_)
self.childnode.append(obj_)
@@ -3733,8 +4229,10 @@ def buildChildren(self, child_, nodeName_):
class label(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if label.subclass:
return label.subclass(*args_, **kwargs_)
@@ -3743,6 +4241,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3754,33 +4253,40 @@ def export(self, outfile, level, namespace_='', name_='label', namespacedef_='')
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='label'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='label'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='label'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3788,19 +4294,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class label
class childnodeType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, relation=None, refid=None, edgelabel=None):
self.relation = relation
self.refid = refid
@@ -3808,6 +4317,7 @@ def __init__(self, relation=None, refid=None, edgelabel=None):
self.edgelabel = []
else:
self.edgelabel = edgelabel
+
def factory(*args_, **kwargs_):
if childnodeType.subclass:
return childnodeType.subclass(*args_, **kwargs_)
@@ -3822,10 +4332,12 @@ def get_relation(self): return self.relation
def set_relation(self, relation): self.relation = relation
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='childnodeType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='childnodeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -3833,27 +4345,34 @@ def export(self, outfile, level, namespace_='', name_='childnodeType', namespace
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'):
if self.relation is not None:
outfile.write(' relation=%s' % (quote_attrib(self.relation), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'):
for edgelabel_ in self.edgelabel:
showIndent(outfile, level)
- outfile.write('<%sedgelabel>%s%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
+ outfile.write('<%sedgelabel>%s%sedgelabel>\n' % (namespace_, self.format_string(
+ quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
+
def hasContent_(self):
if (
self.edgelabel is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='childnodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.relation is not None:
showIndent(outfile, level)
@@ -3861,30 +4380,35 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('edgelabel=[\n')
level += 1
for edgelabel in self.edgelabel:
showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding))
+ outfile.write('%s,\n' % quote_python(
+ edgelabel).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('relation'):
self.relation = attrs.get('relation').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'edgelabel':
+ nodeName_ == 'edgelabel':
edgelabel_ = ''
for text__content_ in child_.childNodes:
edgelabel_ += text__content_.nodeValue
@@ -3895,8 +4419,10 @@ def buildChildren(self, child_, nodeName_):
class edgelabel(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if edgelabel.subclass:
return edgelabel.subclass(*args_, **kwargs_)
@@ -3905,6 +4431,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3916,33 +4443,40 @@ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='edgelabel'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -3950,23 +4484,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class edgelabel
class linkType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, external=None, valueOf_=''):
self.refid = refid
self.external = external
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if linkType.subclass:
return linkType.subclass(*args_, **kwargs_)
@@ -3979,6 +4517,7 @@ def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -3990,31 +4529,38 @@ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='linkType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(
+ self.external).encode(ExternalEncoding), input_name='external'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='linkType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='linkType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
@@ -4022,9 +4568,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4032,27 +4580,31 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('external'):
self.external = attrs.get('external').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class linkType
class listingType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, codeline=None):
if codeline is None:
self.codeline = []
else:
self.codeline = codeline
+
def factory(*args_, **kwargs_):
if listingType.subclass:
return listingType.subclass(*args_, **kwargs_)
@@ -4063,6 +4615,7 @@ def get_codeline(self): return self.codeline
def set_codeline(self, codeline): self.codeline = codeline
def add_codeline(self, value): self.codeline.append(value)
def insert_codeline(self, index, value): self.codeline[index] = value
+
def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4074,25 +4627,31 @@ def export(self, outfile, level, namespace_='', name_='listingType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
for codeline_ in self.codeline:
codeline_.export(outfile, level, namespace_, name_='codeline')
+
def hasContent_(self):
if (
self.codeline is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='listingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('codeline=[\n')
@@ -4106,17 +4665,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'codeline':
+ nodeName_ == 'codeline':
obj_ = codelineType.factory()
obj_.build(child_)
self.codeline.append(obj_)
@@ -4126,6 +4688,7 @@ def buildChildren(self, child_, nodeName_):
class codelineType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
self.external = external
self.lineno = lineno
@@ -4135,6 +4698,7 @@ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlig
self.highlight = []
else:
self.highlight = highlight
+
def factory(*args_, **kwargs_):
if codelineType.subclass:
return codelineType.subclass(*args_, **kwargs_)
@@ -4153,6 +4717,7 @@ def get_refkind(self): return self.refkind
def set_refkind(self, refkind): self.refkind = refkind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4164,30 +4729,37 @@ def export(self, outfile, level, namespace_='', name_='codelineType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
if self.external is not None:
outfile.write(' external=%s' % (quote_attrib(self.external), ))
if self.lineno is not None:
- outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno'))
+ outfile.write(' lineno="%s"' % self.format_integer(
+ self.lineno, input_name='lineno'))
if self.refkind is not None:
outfile.write(' refkind=%s' % (quote_attrib(self.refkind), ))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
for highlight_ in self.highlight:
highlight_.export(outfile, level, namespace_, name_='highlight')
+
def hasContent_(self):
if (
self.highlight is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='codelineType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
@@ -4201,6 +4773,7 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('highlight=[\n')
@@ -4214,12 +4787,14 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('external'):
self.external = attrs.get('external').value
@@ -4232,9 +4807,10 @@ def buildAttributes(self, attrs):
self.refkind = attrs.get('refkind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'highlight':
+ nodeName_ == 'highlight':
obj_ = highlightType.factory()
obj_.build(child_)
self.highlight.append(obj_)
@@ -4244,6 +4820,7 @@ def buildChildren(self, child_, nodeName_):
class highlightType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None):
self.classxx = classxx
if mixedclass_ is None:
@@ -4254,6 +4831,7 @@ def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=N
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if highlightType.subclass:
return highlightType.subclass(*args_, **kwargs_)
@@ -4270,36 +4848,44 @@ def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def get_class(self): return self.classxx
def set_class(self, classxx): self.classxx = classxx
+
def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='highlightType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='highlightType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'):
if self.classxx is not None:
outfile.write(' class=%s' % (quote_attrib(self.classxx), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='highlightType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.sp is not None or
self.ref is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='highlightType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.classxx is not None:
showIndent(outfile, level)
outfile.write('classxx = "%s",\n' % (self.classxx,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -4313,35 +4899,38 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('class'):
self.classxx = attrs.get('class').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sp':
+ nodeName_ == 'sp':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
- MixedContainer.TypeString, 'sp', valuestr_)
+ MixedContainer.TypeString, 'sp', valuestr_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
+ nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
+ MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class highlightType
@@ -4349,8 +4938,10 @@ def buildChildren(self, child_, nodeName_):
class sp(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if sp.subclass:
return sp.subclass(*args_, **kwargs_)
@@ -4359,6 +4950,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4370,33 +4962,40 @@ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='sp'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='sp'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='sp'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4404,19 +5003,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class sp
class referenceType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
self.endline = endline
self.startline = startline
@@ -4430,6 +5032,7 @@ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, v
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if referenceType.subclass:
return referenceType.subclass(*args_, **kwargs_)
@@ -4446,42 +5049,53 @@ def get_compoundref(self): return self.compoundref
def set_compoundref(self, compoundref): self.compoundref = compoundref
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='referenceType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='referenceType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'):
if self.endline is not None:
- outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline'))
+ outfile.write(' endline="%s"' % self.format_integer(
+ self.endline, input_name='endline'))
if self.startline is not None:
- outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline'))
+ outfile.write(' startline="%s"' % self.format_integer(
+ self.startline, input_name='startline'))
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.compoundref is not None:
- outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
+ outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(
+ self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='referenceType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='referenceType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.endline is not None:
showIndent(outfile, level)
@@ -4495,9 +5109,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.compoundref is not None:
showIndent(outfile, level)
outfile.write('compoundref = %s,\n' % (self.compoundref,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4505,6 +5121,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('endline'):
try:
@@ -4520,21 +5137,23 @@ def buildAttributes(self, attrs):
self.refid = attrs.get('refid').value
if attrs.get('compoundref'):
self.compoundref = attrs.get('compoundref').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class referenceType
class locationType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
self.bodystart = bodystart
self.line = line
@@ -4542,6 +5161,7 @@ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=
self.bodyfile = bodyfile
self.file = file
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if locationType.subclass:
return locationType.subclass(*args_, **kwargs_)
@@ -4560,6 +5180,7 @@ def get_file(self): return self.file
def set_file(self, file): self.file = file
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4571,37 +5192,47 @@ def export(self, outfile, level, namespace_='', name_='locationType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='locationType'):
if self.bodystart is not None:
- outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart'))
+ outfile.write(' bodystart="%s"' % self.format_integer(
+ self.bodystart, input_name='bodystart'))
if self.line is not None:
- outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line'))
+ outfile.write(' line="%s"' % self.format_integer(
+ self.line, input_name='line'))
if self.bodyend is not None:
- outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend'))
+ outfile.write(' bodyend="%s"' % self.format_integer(
+ self.bodyend, input_name='bodyend'))
if self.bodyfile is not None:
- outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
+ outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(
+ self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
if self.file is not None:
- outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), ))
+ outfile.write(' file=%s' % (self.format_string(quote_attrib(
+ self.file).encode(ExternalEncoding), input_name='file'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='locationType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='locationType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.bodystart is not None:
showIndent(outfile, level)
@@ -4618,9 +5249,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.file is not None:
showIndent(outfile, level)
outfile.write('file = %s,\n' % (self.file,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -4628,6 +5261,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('bodystart'):
try:
@@ -4648,17 +5282,19 @@ def buildAttributes(self, attrs):
self.bodyfile = attrs.get('bodyfile').value
if attrs.get('file'):
self.file = attrs.get('file').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class locationType
class docSect1Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -4669,6 +5305,7 @@ def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mi
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect1Type.subclass:
return docSect1Type.subclass(*args_, **kwargs_)
@@ -4689,6 +5326,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4696,31 +5334,38 @@ def export(self, outfile, level, namespace_='', name_='docSect1Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect2 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect1Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -4746,47 +5391,50 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect2':
+ nodeName_ == 'sect2':
childobj_ = docSect2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect2', childobj_)
+ MixedContainer.TypeNone, 'sect2', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect1Type
@@ -4794,6 +5442,7 @@ def buildChildren(self, child_, nodeName_):
class docSect2Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -4804,6 +5453,7 @@ def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mi
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect2Type.subclass:
return docSect2Type.subclass(*args_, **kwargs_)
@@ -4824,6 +5474,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4831,31 +5482,38 @@ def export(self, outfile, level, namespace_='', name_='docSect2Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect3 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect2Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -4881,47 +5539,50 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
+ nodeName_ == 'sect3':
childobj_ = docSect3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
+ MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect2Type
@@ -4929,6 +5590,7 @@ def buildChildren(self, child_, nodeName_):
class docSect3Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -4939,6 +5601,7 @@ def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mi
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect3Type.subclass:
return docSect3Type.subclass(*args_, **kwargs_)
@@ -4959,6 +5622,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -4966,31 +5630,38 @@ def export(self, outfile, level, namespace_='', name_='docSect3Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect4 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect3Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5016,47 +5687,50 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect4':
+ nodeName_ == 'sect4':
childobj_ = docSect4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect4', childobj_)
+ MixedContainer.TypeNone, 'sect4', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect3Type
@@ -5064,6 +5738,7 @@ def buildChildren(self, child_, nodeName_):
class docSect4Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -5074,6 +5749,7 @@ def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=No
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docSect4Type.subclass:
return docSect4Type.subclass(*args_, **kwargs_)
@@ -5090,6 +5766,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5097,30 +5774,37 @@ def export(self, outfile, level, namespace_='', name_='docSect4Type', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSect4Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5140,40 +5824,43 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
+ MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
childobj_ = docInternalS4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
+ MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect4Type
@@ -5181,6 +5868,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5190,6 +5878,7 @@ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalType.subclass:
return docInternalType.subclass(*args_, **kwargs_)
@@ -5204,33 +5893,41 @@ def get_sect1(self): return self.sect1
def set_sect1(self, sect1): self.sect1 = sect1
def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect1 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5244,32 +5941,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
+ nodeName_ == 'sect1':
childobj_ = docSect1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect1', childobj_)
+ MixedContainer.TypeNone, 'sect1', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalType
@@ -5277,6 +5977,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS1Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5286,6 +5987,7 @@ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS1Type.subclass:
return docInternalS1Type.subclass(*args_, **kwargs_)
@@ -5300,33 +6002,41 @@ def get_sect2(self): return self.sect2
def set_sect2(self, sect2): self.sect2 = sect2
def add_sect2(self, value): self.sect2.append(value)
def insert_sect2(self, index, value): self.sect2[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS1Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect2 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS1Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5340,32 +6050,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect2':
+ nodeName_ == 'sect2':
childobj_ = docSect2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect2', childobj_)
+ MixedContainer.TypeNone, 'sect2', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS1Type
@@ -5373,6 +6086,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS2Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5382,6 +6096,7 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS2Type.subclass:
return docInternalS2Type.subclass(*args_, **kwargs_)
@@ -5396,33 +6111,41 @@ def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS2Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect3 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS2Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5436,32 +6159,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
+ nodeName_ == 'sect3':
childobj_ = docSect3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
+ MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS2Type
@@ -5469,6 +6195,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS3Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5478,6 +6205,7 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS3Type.subclass:
return docInternalS3Type.subclass(*args_, **kwargs_)
@@ -5492,33 +6220,41 @@ def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS3Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None or
self.sect3 is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS3Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5532,32 +6268,35 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
+ nodeName_ == 'sect3':
childobj_ = docSect4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
+ MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS3Type
@@ -5565,6 +6304,7 @@ def buildChildren(self, child_, nodeName_):
class docInternalS4Type(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5574,6 +6314,7 @@ def __init__(self, para=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docInternalS4Type.subclass:
return docInternalS4Type.subclass(*args_, **kwargs_)
@@ -5584,32 +6325,40 @@ def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
+
def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docInternalS4Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docInternalS4Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -5617,25 +6366,28 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
+ MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS4Type
@@ -5643,6 +6395,7 @@ def buildChildren(self, child_, nodeName_):
class docTitleType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5652,6 +6405,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docTitleType.subclass:
return docTitleType.subclass(*args_, **kwargs_)
@@ -5660,6 +6414,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5667,33 +6422,40 @@ def export(self, outfile, level, namespace_='', name_='docTitleType', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTitleType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5701,23 +6463,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docTitleType
class docParaType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5727,6 +6492,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docParaType.subclass:
return docParaType.subclass(*args_, **kwargs_)
@@ -5735,6 +6501,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5742,33 +6509,40 @@ def export(self, outfile, level, namespace_='', name_='docParaType', namespacede
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docParaType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParaType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5776,23 +6550,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docParaType
class docMarkupType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -5802,6 +6579,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docMarkupType.subclass:
return docMarkupType.subclass(*args_, **kwargs_)
@@ -5810,40 +6588,49 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docMarkupType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docMarkupType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docMarkupType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5851,23 +6638,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docMarkupType
class docURLLink(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
self.url = url
if mixedclass_ is None:
@@ -5878,6 +6668,7 @@ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docURLLink.subclass:
return docURLLink.subclass(*args_, **kwargs_)
@@ -5888,6 +6679,7 @@ def get_url(self): return self.url
def set_url(self, url): self.url = url
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -5895,36 +6687,44 @@ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
if self.url is not None:
- outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), ))
+ outfile.write(' url=%s' % (self.format_string(quote_attrib(
+ self.url).encode(ExternalEncoding), input_name='url'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docURLLink'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.url is not None:
showIndent(outfile, level)
outfile.write('url = %s,\n' % (self.url,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -5932,24 +6732,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('url'):
self.url = attrs.get('url').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docURLLink
class docAnchorType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -5960,6 +6763,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docAnchorType.subclass:
return docAnchorType.subclass(*args_, **kwargs_)
@@ -5970,43 +6774,53 @@ def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docAnchorType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docAnchorType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docAnchorType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6014,24 +6828,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docAnchorType
class docFormulaType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -6042,6 +6859,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docFormulaType.subclass:
return docFormulaType.subclass(*args_, **kwargs_)
@@ -6052,43 +6870,53 @@ def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docFormulaType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docFormulaType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docFormulaType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6096,27 +6924,31 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docFormulaType
class docIndexEntryType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, primaryie=None, secondaryie=None):
self.primaryie = primaryie
self.secondaryie = secondaryie
+
def factory(*args_, **kwargs_):
if docIndexEntryType.subclass:
return docIndexEntryType.subclass(*args_, **kwargs_)
@@ -6127,10 +6959,12 @@ def get_primaryie(self): return self.primaryie
def set_primaryie(self, primaryie): self.primaryie = primaryie
def get_secondaryie(self): return self.secondaryie
def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie
+
def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docIndexEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6138,52 +6972,65 @@ def export(self, outfile, level, namespace_='', name_='docIndexEntryType', names
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'):
if self.primaryie is not None:
showIndent(outfile, level)
- outfile.write('<%sprimaryie>%s%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
+ outfile.write('<%sprimaryie>%s%sprimaryie>\n' % (namespace_, self.format_string(
+ quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
if self.secondaryie is not None:
showIndent(outfile, level)
- outfile.write('<%ssecondaryie>%s%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
+ outfile.write('<%ssecondaryie>%s%ssecondaryie>\n' % (namespace_, self.format_string(
+ quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
+
def hasContent_(self):
if (
self.primaryie is not None or
self.secondaryie is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docIndexEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding))
+ outfile.write('primaryie=%s,\n' % quote_python(
+ self.primaryie).encode(ExternalEncoding))
showIndent(outfile, level)
- outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding))
+ outfile.write('secondaryie=%s,\n' % quote_python(
+ self.secondaryie).encode(ExternalEncoding))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'primaryie':
+ nodeName_ == 'primaryie':
primaryie_ = ''
for text__content_ in child_.childNodes:
primaryie_ += text__content_.nodeValue
self.primaryie = primaryie_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'secondaryie':
+ nodeName_ == 'secondaryie':
secondaryie_ = ''
for text__content_ in child_.childNodes:
secondaryie_ += text__content_.nodeValue
@@ -6194,11 +7041,13 @@ def buildChildren(self, child_, nodeName_):
class docListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, listitem=None):
if listitem is None:
self.listitem = []
else:
self.listitem = listitem
+
def factory(*args_, **kwargs_):
if docListType.subclass:
return docListType.subclass(*args_, **kwargs_)
@@ -6209,6 +7058,7 @@ def get_listitem(self): return self.listitem
def set_listitem(self, listitem): self.listitem = listitem
def add_listitem(self, value): self.listitem.append(value)
def insert_listitem(self, index, value): self.listitem[index] = value
+
def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6220,25 +7070,31 @@ def export(self, outfile, level, namespace_='', name_='docListType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docListType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docListType'):
for listitem_ in self.listitem:
listitem_.export(outfile, level, namespace_, name_='listitem')
+
def hasContent_(self):
if (
self.listitem is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('listitem=[\n')
@@ -6252,17 +7108,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'listitem':
+ nodeName_ == 'listitem':
obj_ = docListItemType.factory()
obj_.build(child_)
self.listitem.append(obj_)
@@ -6272,11 +7131,13 @@ def buildChildren(self, child_, nodeName_):
class docListItemType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, para=None):
if para is None:
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docListItemType.subclass:
return docListItemType.subclass(*args_, **kwargs_)
@@ -6287,10 +7148,12 @@ def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
+
def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docListItemType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docListItemType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6298,25 +7161,31 @@ def export(self, outfile, level, namespace_='', name_='docListItemType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docListItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -6330,17 +7199,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -6350,6 +7222,7 @@ def buildChildren(self, child_, nodeName_):
class docSimpleSectType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, title=None, para=None):
self.kind = kind
self.title = title
@@ -6357,6 +7230,7 @@ def __init__(self, kind=None, title=None, para=None):
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docSimpleSectType.subclass:
return docSimpleSectType.subclass(*args_, **kwargs_)
@@ -6371,10 +7245,12 @@ def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
+
def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docSimpleSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6382,31 +7258,37 @@ def export(self, outfile, level, namespace_='', name_='docSimpleSectType', names
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'):
if self.title:
self.title.export(outfile, level, namespace_, name_='title')
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.title is not None or
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docSimpleSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
+
def exportLiteralChildren(self, outfile, level, name_):
if self.title:
showIndent(outfile, level)
@@ -6426,23 +7308,26 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
+ nodeName_ == 'title':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_title(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -6452,8 +7337,10 @@ def buildChildren(self, child_, nodeName_):
class docVarListEntryType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, term=None):
self.term = term
+
def factory(*args_, **kwargs_):
if docVarListEntryType.subclass:
return docVarListEntryType.subclass(*args_, **kwargs_)
@@ -6462,10 +7349,12 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def get_term(self): return self.term
def set_term(self, term): self.term = term
+
def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docVarListEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6473,25 +7362,31 @@ def export(self, outfile, level, namespace_='', name_='docVarListEntryType', nam
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
if self.term:
self.term.export(outfile, level, namespace_, name_='term', )
+
def hasContent_(self):
if (
self.term is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
if self.term:
showIndent(outfile, level)
@@ -6499,17 +7394,20 @@ def exportLiteralChildren(self, outfile, level, name_):
self.term.exportLiteral(outfile, level, name_='term')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'term':
+ nodeName_ == 'term':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_term(obj_)
@@ -6519,8 +7417,10 @@ def buildChildren(self, child_, nodeName_):
class docVariableListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if docVariableListType.subclass:
return docVariableListType.subclass(*args_, **kwargs_)
@@ -6529,10 +7429,12 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docVariableListType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docVariableListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -6540,33 +7442,40 @@ def export(self, outfile, level, namespace_='', name_='docVariableListType', nam
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docVariableListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6574,19 +7483,22 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docVariableListType
class docRefTextType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
self.kindref = kindref
@@ -6599,6 +7511,7 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docRefTextType.subclass:
return docRefTextType.subclass(*args_, **kwargs_)
@@ -6613,40 +7526,49 @@ def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docRefTextType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docRefTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'):
if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.kindref is not None:
outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(
+ self.external).encode(ExternalEncoding), input_name='external'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docRefTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
@@ -6657,9 +7579,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -6667,6 +7591,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
@@ -6674,21 +7599,23 @@ def buildAttributes(self, attrs):
self.kindref = attrs.get('kindref').value
if attrs.get('external'):
self.external = attrs.get('external').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docRefTextType
class docTableType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, rows=None, cols=None, row=None, caption=None):
self.rows = rows
self.cols = cols
@@ -6697,6 +7624,7 @@ def __init__(self, rows=None, cols=None, row=None, caption=None):
else:
self.row = row
self.caption = caption
+
def factory(*args_, **kwargs_):
if docTableType.subclass:
return docTableType.subclass(*args_, **kwargs_)
@@ -6713,6 +7641,7 @@ def get_rows(self): return self.rows
def set_rows(self, rows): self.rows = rows
def get_cols(self): return self.cols
def set_cols(self, cols): self.cols = cols
+
def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6724,29 +7653,36 @@ def export(self, outfile, level, namespace_='', name_='docTableType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'):
if self.rows is not None:
- outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows'))
+ outfile.write(' rows="%s"' % self.format_integer(
+ self.rows, input_name='rows'))
if self.cols is not None:
- outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols'))
+ outfile.write(' cols="%s"' % self.format_integer(
+ self.cols, input_name='cols'))
+
def exportChildren(self, outfile, level, namespace_='', name_='docTableType'):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
if self.caption:
self.caption.export(outfile, level, namespace_, name_='caption')
+
def hasContent_(self):
if (
self.row is not None or
self.caption is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTableType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.rows is not None:
showIndent(outfile, level)
@@ -6754,6 +7690,7 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.cols is not None:
showIndent(outfile, level)
outfile.write('cols = %s,\n' % (self.cols,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('row=[\n')
@@ -6773,12 +7710,14 @@ def exportLiteralChildren(self, outfile, level, name_):
self.caption.exportLiteral(outfile, level, name_='caption')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('rows'):
try:
@@ -6790,14 +7729,15 @@ def buildAttributes(self, attrs):
self.cols = int(attrs.get('cols').value)
except ValueError as exp:
raise ValueError('Bad integer attribute (cols): %s' % exp)
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'row':
+ nodeName_ == 'row':
obj_ = docRowType.factory()
obj_.build(child_)
self.row.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'caption':
+ nodeName_ == 'caption':
obj_ = docCaptionType.factory()
obj_.build(child_)
self.set_caption(obj_)
@@ -6807,11 +7747,13 @@ def buildChildren(self, child_, nodeName_):
class docRowType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, entry=None):
if entry is None:
self.entry = []
else:
self.entry = entry
+
def factory(*args_, **kwargs_):
if docRowType.subclass:
return docRowType.subclass(*args_, **kwargs_)
@@ -6822,6 +7764,7 @@ def get_entry(self): return self.entry
def set_entry(self, entry): self.entry = entry
def add_entry(self, value): self.entry.append(value)
def insert_entry(self, index, value): self.entry[index] = value
+
def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6833,25 +7776,31 @@ def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docRowType'):
for entry_ in self.entry:
entry_.export(outfile, level, namespace_, name_='entry')
+
def hasContent_(self):
if (
self.entry is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docRowType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('entry=[\n')
@@ -6865,17 +7814,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'entry':
+ nodeName_ == 'entry':
obj_ = docEntryType.factory()
obj_.build(child_)
self.entry.append(obj_)
@@ -6885,12 +7837,14 @@ def buildChildren(self, child_, nodeName_):
class docEntryType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, thead=None, para=None):
self.thead = thead
if para is None:
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docEntryType.subclass:
return docEntryType.subclass(*args_, **kwargs_)
@@ -6903,6 +7857,7 @@ def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_thead(self): return self.thead
def set_thead(self, thead): self.thead = thead
+
def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -6914,28 +7869,34 @@ def export(self, outfile, level, namespace_='', name_='docEntryType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'):
if self.thead is not None:
outfile.write(' thead=%s' % (quote_attrib(self.thead), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.thead is not None:
showIndent(outfile, level)
outfile.write('thead = "%s",\n' % (self.thead,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -6949,18 +7910,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('thead'):
self.thead = attrs.get('thead').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -6970,6 +7934,7 @@ def buildChildren(self, child_, nodeName_):
class docCaptionType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
@@ -6979,6 +7944,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docCaptionType.subclass:
return docCaptionType.subclass(*args_, **kwargs_)
@@ -6987,40 +7953,49 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docCaptionType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docCaptionType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docCaptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7028,23 +8003,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docCaptionType
class docHeadingType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
self.level = level
if mixedclass_ is None:
@@ -7055,6 +8033,7 @@ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docHeadingType.subclass:
return docHeadingType.subclass(*args_, **kwargs_)
@@ -7065,43 +8044,53 @@ def get_level(self): return self.level
def set_level(self, level): self.level = level
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docHeadingType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docHeadingType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'):
if self.level is not None:
- outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level'))
+ outfile.write(' level="%s"' % self.format_integer(
+ self.level, input_name='level'))
+
def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docHeadingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.level is not None:
showIndent(outfile, level)
outfile.write('level = %s,\n' % (self.level,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7109,27 +8098,30 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('level'):
try:
self.level = int(attrs.get('level').value)
except ValueError as exp:
raise ValueError('Bad integer attribute (level): %s' % exp)
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docHeadingType
class docImageType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
self.width = width
self.type_ = type_
@@ -7143,6 +8135,7 @@ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='',
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docImageType.subclass:
return docImageType.subclass(*args_, **kwargs_)
@@ -7159,6 +8152,7 @@ def get_height(self): return self.height
def set_height(self, height): self.height = height
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -7166,35 +8160,43 @@ def export(self, outfile, level, namespace_='', name_='docImageType', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'):
if self.width is not None:
- outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), ))
+ outfile.write(' width=%s' % (self.format_string(quote_attrib(
+ self.width).encode(ExternalEncoding), input_name='width'), ))
if self.type_ is not None:
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.name is not None:
- outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(
+ self.name).encode(ExternalEncoding), input_name='name'), ))
if self.height is not None:
- outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), ))
+ outfile.write(' height=%s' % (self.format_string(quote_attrib(
+ self.height).encode(ExternalEncoding), input_name='height'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docImageType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docImageType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.width is not None:
showIndent(outfile, level)
@@ -7208,9 +8210,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.height is not None:
showIndent(outfile, level)
outfile.write('height = %s,\n' % (self.height,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7218,6 +8222,7 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('width'):
self.width = attrs.get('width').value
@@ -7227,21 +8232,23 @@ def buildAttributes(self, attrs):
self.name = attrs.get('name').value
if attrs.get('height'):
self.height = attrs.get('height').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docImageType
class docDotFileType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
self.name = name
if mixedclass_ is None:
@@ -7252,6 +8259,7 @@ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docDotFileType.subclass:
return docDotFileType.subclass(*args_, **kwargs_)
@@ -7262,43 +8270,53 @@ def get_name(self): return self.name
def set_name(self, name): self.name = name
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docDotFileType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docDotFileType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'):
if self.name is not None:
- outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(
+ self.name).encode(ExternalEncoding), input_name='name'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docDotFileType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name = %s,\n' % (self.name,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7306,24 +8324,27 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('name'):
self.name = attrs.get('name').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docDotFileType
class docTocItemType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
@@ -7334,6 +8355,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docTocItemType.subclass:
return docTocItemType.subclass(*args_, **kwargs_)
@@ -7344,43 +8366,53 @@ def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTocItemType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docTocItemType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTocItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -7388,29 +8420,33 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docTocItemType
class docTocListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, tocitem=None):
if tocitem is None:
self.tocitem = []
else:
self.tocitem = tocitem
+
def factory(*args_, **kwargs_):
if docTocListType.subclass:
return docTocListType.subclass(*args_, **kwargs_)
@@ -7421,10 +8457,12 @@ def get_tocitem(self): return self.tocitem
def set_tocitem(self, tocitem): self.tocitem = tocitem
def add_tocitem(self, value): self.tocitem.append(value)
def insert_tocitem(self, index, value): self.tocitem[index] = value
+
def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTocListType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docTocListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7432,25 +8470,31 @@ def export(self, outfile, level, namespace_='', name_='docTocListType', namespac
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'):
for tocitem_ in self.tocitem:
tocitem_.export(outfile, level, namespace_, name_='tocitem')
+
def hasContent_(self):
if (
self.tocitem is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docTocListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('tocitem=[\n')
@@ -7464,17 +8508,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'tocitem':
+ nodeName_ == 'tocitem':
obj_ = docTocItemType.factory()
obj_.build(child_)
self.tocitem.append(obj_)
@@ -7484,12 +8531,14 @@ def buildChildren(self, child_, nodeName_):
class docLanguageType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, langid=None, para=None):
self.langid = langid
if para is None:
self.para = []
else:
self.para = para
+
def factory(*args_, **kwargs_):
if docLanguageType.subclass:
return docLanguageType.subclass(*args_, **kwargs_)
@@ -7502,10 +8551,12 @@ def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_langid(self): return self.langid
def set_langid(self, langid): self.langid = langid
+
def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docLanguageType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docLanguageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7513,28 +8564,35 @@ def export(self, outfile, level, namespace_='', name_='docLanguageType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'):
if self.langid is not None:
- outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), ))
+ outfile.write(' langid=%s' % (self.format_string(quote_attrib(
+ self.langid).encode(ExternalEncoding), input_name='langid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
+
def hasContent_(self):
if (
self.para is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docLanguageType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.langid is not None:
showIndent(outfile, level)
outfile.write('langid = %s,\n' % (self.langid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -7548,18 +8606,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('langid'):
self.langid = attrs.get('langid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
@@ -7569,12 +8630,14 @@ def buildChildren(self, child_, nodeName_):
class docParamListType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, parameteritem=None):
self.kind = kind
if parameteritem is None:
self.parameteritem = []
else:
self.parameteritem = parameteritem
+
def factory(*args_, **kwargs_):
if docParamListType.subclass:
return docParamListType.subclass(*args_, **kwargs_)
@@ -7582,15 +8645,21 @@ def factory(*args_, **kwargs_):
return docParamListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parameteritem(self): return self.parameteritem
- def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem
+ def set_parameteritem(
+ self, parameteritem): self.parameteritem = parameteritem
+
def add_parameteritem(self, value): self.parameteritem.append(value)
- def insert_parameteritem(self, index, value): self.parameteritem[index] = value
+ def insert_parameteritem(
+ self, index, value): self.parameteritem[index] = value
+
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
+
def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamListType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docParamListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7598,28 +8667,35 @@ def export(self, outfile, level, namespace_='', name_='docParamListType', namesp
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'):
for parameteritem_ in self.parameteritem:
- parameteritem_.export(outfile, level, namespace_, name_='parameteritem')
+ parameteritem_.export(
+ outfile, level, namespace_, name_='parameteritem')
+
def hasContent_(self):
if (
self.parameteritem is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parameteritem=[\n')
@@ -7633,18 +8709,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameteritem':
+ nodeName_ == 'parameteritem':
obj_ = docParamListItem.factory()
obj_.build(child_)
self.parameteritem.append(obj_)
@@ -7654,12 +8733,14 @@ def buildChildren(self, child_, nodeName_):
class docParamListItem(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, parameternamelist=None, parameterdescription=None):
if parameternamelist is None:
self.parameternamelist = []
else:
self.parameternamelist = parameternamelist
self.parameterdescription = parameterdescription
+
def factory(*args_, **kwargs_):
if docParamListItem.subclass:
return docParamListItem.subclass(*args_, **kwargs_)
@@ -7667,15 +8748,25 @@ def factory(*args_, **kwargs_):
return docParamListItem(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parameternamelist(self): return self.parameternamelist
- def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist
- def add_parameternamelist(self, value): self.parameternamelist.append(value)
- def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value
+
+ def set_parameternamelist(
+ self, parameternamelist): self.parameternamelist = parameternamelist
+
+ def add_parameternamelist(
+ self, value): self.parameternamelist.append(value)
+ def insert_parameternamelist(
+ self, index, value): self.parameternamelist[index] = value
+
def get_parameterdescription(self): return self.parameterdescription
- def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription
+
+ def set_parameterdescription(
+ self, parameterdescription): self.parameterdescription = parameterdescription
+
def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamListItem')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docParamListItem')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7683,28 +8774,36 @@ def export(self, outfile, level, namespace_='', name_='docParamListItem', namesp
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'):
for parameternamelist_ in self.parameternamelist:
- parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist')
+ parameternamelist_.export(
+ outfile, level, namespace_, name_='parameternamelist')
if self.parameterdescription:
- self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', )
+ self.parameterdescription.export(
+ outfile, level, namespace_, name_='parameterdescription', )
+
def hasContent_(self):
if (
self.parameternamelist is not None or
self.parameterdescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamListItem'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parameternamelist=[\n')
@@ -7712,7 +8811,8 @@ def exportLiteralChildren(self, outfile, level, name_):
for parameternamelist in self.parameternamelist:
showIndent(outfile, level)
outfile.write('model_.parameternamelist(\n')
- parameternamelist.exportLiteral(outfile, level, name_='parameternamelist')
+ parameternamelist.exportLiteral(
+ outfile, level, name_='parameternamelist')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
@@ -7721,25 +8821,29 @@ def exportLiteralChildren(self, outfile, level, name_):
if self.parameterdescription:
showIndent(outfile, level)
outfile.write('parameterdescription=model_.descriptionType(\n')
- self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription')
+ self.parameterdescription.exportLiteral(
+ outfile, level, name_='parameterdescription')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameternamelist':
+ nodeName_ == 'parameternamelist':
obj_ = docParamNameList.factory()
obj_.build(child_)
self.parameternamelist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameterdescription':
+ nodeName_ == 'parameterdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_parameterdescription(obj_)
@@ -7749,11 +8853,13 @@ def buildChildren(self, child_, nodeName_):
class docParamNameList(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, parametername=None):
if parametername is None:
self.parametername = []
else:
self.parametername = parametername
+
def factory(*args_, **kwargs_):
if docParamNameList.subclass:
return docParamNameList.subclass(*args_, **kwargs_)
@@ -7761,13 +8867,19 @@ def factory(*args_, **kwargs_):
return docParamNameList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parametername(self): return self.parametername
- def set_parametername(self, parametername): self.parametername = parametername
+ def set_parametername(
+ self, parametername): self.parametername = parametername
+
def add_parametername(self, value): self.parametername.append(value)
- def insert_parametername(self, index, value): self.parametername[index] = value
+
+ def insert_parametername(
+ self, index, value): self.parametername[index] = value
+
def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docParamNameList')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7775,25 +8887,32 @@ def export(self, outfile, level, namespace_='', name_='docParamNameList', namesp
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
for parametername_ in self.parametername:
- parametername_.export(outfile, level, namespace_, name_='parametername')
+ parametername_.export(
+ outfile, level, namespace_, name_='parametername')
+
def hasContent_(self):
if (
self.parametername is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamNameList'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parametername=[\n')
@@ -7807,17 +8926,20 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parametername':
+ nodeName_ == 'parametername':
obj_ = docParamName.factory()
obj_.build(child_)
self.parametername.append(obj_)
@@ -7827,6 +8949,7 @@ def buildChildren(self, child_, nodeName_):
class docParamName(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
self.direction = direction
if mixedclass_ is None:
@@ -7837,6 +8960,7 @@ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
self.content_ = []
else:
self.content_ = content_
+
def factory(*args_, **kwargs_):
if docParamName.subclass:
return docParamName.subclass(*args_, **kwargs_)
@@ -7847,6 +8971,7 @@ def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_direction(self): return self.direction
def set_direction(self, direction): self.direction = direction
+
def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -7854,28 +8979,34 @@ def export(self, outfile, level, namespace_='', name_='docParamName', namespaced
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('%s%s>\n' % (namespace_, name_))
+
def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'):
if self.direction is not None:
outfile.write(' direction=%s' % (quote_attrib(self.direction), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docParamName'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
+
def hasContent_(self):
if (
self.ref is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docParamName'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.direction is not None:
showIndent(outfile, level)
outfile.write('direction = "%s",\n' % (self.direction,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
@@ -7883,26 +9014,29 @@ def exportLiteralChildren(self, outfile, level, name_):
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('direction'):
self.direction = attrs.get('direction').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
+ nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
+ MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
+ MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docParamName
@@ -7910,6 +9044,7 @@ def buildChildren(self, child_, nodeName_):
class docXRefSectType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
self.id = id
if xreftitle is None:
@@ -7917,6 +9052,7 @@ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
else:
self.xreftitle = xreftitle
self.xrefdescription = xrefdescription
+
def factory(*args_, **kwargs_):
if docXRefSectType.subclass:
return docXRefSectType.subclass(*args_, **kwargs_)
@@ -7928,13 +9064,17 @@ def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle
def add_xreftitle(self, value): self.xreftitle.append(value)
def insert_xreftitle(self, index, value): self.xreftitle[index] = value
def get_xrefdescription(self): return self.xrefdescription
- def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription
+ def set_xrefdescription(
+ self, xrefdescription): self.xrefdescription = xrefdescription
+
def get_id(self): return self.id
def set_id(self, id): self.id = id
+
def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
+ self.exportAttributes(outfile, level, namespace_,
+ name_='docXRefSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
@@ -7942,66 +9082,80 @@ def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespa
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(
+ self.id).encode(ExternalEncoding), input_name='id'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
for xreftitle_ in self.xreftitle:
showIndent(outfile, level)
- outfile.write('<%sxreftitle>%s%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
+ outfile.write('<%sxreftitle>%s%sxreftitle>\n' % (namespace_, self.format_string(
+ quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
if self.xrefdescription:
- self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', )
+ self.xrefdescription.export(
+ outfile, level, namespace_, name_='xrefdescription', )
+
def hasContent_(self):
if (
self.xreftitle is not None or
self.xrefdescription is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docXRefSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('xreftitle=[\n')
level += 1
for xreftitle in self.xreftitle:
showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding))
+ outfile.write('%s,\n' % quote_python(
+ xreftitle).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.xrefdescription:
showIndent(outfile, level)
outfile.write('xrefdescription=model_.descriptionType(\n')
- self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
+ self.xrefdescription.exportLiteral(
+ outfile, level, name_='xrefdescription')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'xreftitle':
+ nodeName_ == 'xreftitle':
xreftitle_ = ''
for text__content_ in child_.childNodes:
xreftitle_ += text__content_.nodeValue
self.xreftitle.append(xreftitle_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'xrefdescription':
+ nodeName_ == 'xrefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_xrefdescription(obj_)
@@ -8011,6 +9165,7 @@ def buildChildren(self, child_, nodeName_):
class docCopyType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, link=None, para=None, sect1=None, internal=None):
self.link = link
if para is None:
@@ -8022,6 +9177,7 @@ def __init__(self, link=None, para=None, sect1=None, internal=None):
else:
self.sect1 = sect1
self.internal = internal
+
def factory(*args_, **kwargs_):
if docCopyType.subclass:
return docCopyType.subclass(*args_, **kwargs_)
@@ -8040,6 +9196,7 @@ def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_link(self): return self.link
def set_link(self, link): self.link = link
+
def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -8051,9 +9208,12 @@ def export(self, outfile, level, namespace_='', name_='docCopyType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'):
if self.link is not None:
- outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), ))
+ outfile.write(' link=%s' % (self.format_string(quote_attrib(
+ self.link).encode(ExternalEncoding), input_name='link'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
@@ -8061,24 +9221,28 @@ def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
sect1_.export(outfile, level, namespace_, name_='sect1')
if self.internal:
self.internal.export(outfile, level, namespace_, name_='internal')
+
def hasContent_(self):
if (
self.para is not None or
self.sect1 is not None or
self.internal is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docCopyType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.link is not None:
showIndent(outfile, level)
outfile.write('link = %s,\n' % (self.link,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
@@ -8110,28 +9274,31 @@ def exportLiteralChildren(self, outfile, level, name_):
self.internal.exportLiteral(outfile, level, name_='internal')
showIndent(outfile, level)
outfile.write('),\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('link'):
self.link = attrs.get('link').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
+ nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
+ nodeName_ == 'sect1':
obj_ = docSect1Type.factory()
obj_.build(child_)
self.sect1.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
+ nodeName_ == 'internal':
obj_ = docInternalType.factory()
obj_.build(child_)
self.set_internal(obj_)
@@ -8141,9 +9308,11 @@ def buildChildren(self, child_, nodeName_):
class docCharType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, char=None, valueOf_=''):
self.char = char
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if docCharType.subclass:
return docCharType.subclass(*args_, **kwargs_)
@@ -8154,6 +9323,7 @@ def get_char(self): return self.char
def set_char(self, char): self.char = char
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -8165,36 +9335,43 @@ def export(self, outfile, level, namespace_='', name_='docCharType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'):
if self.char is not None:
outfile.write(' char=%s' % (quote_attrib(self.char), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='docCharType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docCharType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.char is not None:
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -8202,22 +9379,26 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('char'):
self.char = attrs.get('char').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docCharType
class docEmptyType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
+
def factory(*args_, **kwargs_):
if docEmptyType.subclass:
return docEmptyType.subclass(*args_, **kwargs_)
@@ -8226,6 +9407,7 @@ def factory(*args_, **kwargs_):
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+
def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -8237,33 +9419,40 @@ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'):
pass
+
def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','')
+ if self.valueOf_.find('![CDATA') > -1:
+ value = quote_xml('%s' % self.valueOf_)
+ value = value.replace('![CDATA', '')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
+
def hasContent_(self):
if (
self.valueOf_ is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='docEmptyType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
pass
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
@@ -8271,13 +9460,15 @@ def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
pass
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+ self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]'
# end class docEmptyType
@@ -8287,6 +9478,7 @@ def buildChildren(self, child_, nodeName_):
-s Use the SAX parser, not the minidom parser.
"""
+
def usage():
print(USAGE_TEXT)
sys.exit(1)
@@ -8301,7 +9493,7 @@ def parse(inFileName):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygen",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -8314,7 +9506,7 @@ def parseString(inString):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygen",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -8343,4 +9535,4 @@ def main():
if __name__ == '__main__':
main()
#import pdb
- #pdb.run('main()')
+ # pdb.run('main()')
diff --git a/docs/doxygen/doxyxml/generated/compoundsuper.pyc b/docs/doxygen/doxyxml/generated/compoundsuper.pyc
deleted file mode 100644
index a715daa..0000000
Binary files a/docs/doxygen/doxyxml/generated/compoundsuper.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/generated/index.py b/docs/doxygen/doxyxml/generated/index.py
index 0c63512..7ffbdf1 100644
--- a/docs/doxygen/doxyxml/generated/index.py
+++ b/docs/doxygen/doxyxml/generated/index.py
@@ -3,8 +3,6 @@
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
from xml.dom import minidom
@@ -14,6 +12,7 @@
from . import indexsuper as supermod
+
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
@@ -34,6 +33,7 @@ def find_compounds_and_members(self, details):
return results
+
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
@@ -55,6 +55,7 @@ def find_members(self, details):
return results
+
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
@@ -64,6 +65,7 @@ class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
+
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
@@ -76,4 +78,3 @@ def parse(inFilename):
rootObj.build(rootNode)
return rootObj
-
diff --git a/docs/doxygen/doxyxml/generated/index.pyc b/docs/doxygen/doxyxml/generated/index.pyc
deleted file mode 100644
index f3e0c90..0000000
Binary files a/docs/doxygen/doxyxml/generated/index.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/generated/indexsuper.py b/docs/doxygen/doxyxml/generated/indexsuper.py
index 11312db..b30e062 100644
--- a/docs/doxygen/doxyxml/generated/indexsuper.py
+++ b/docs/doxygen/doxyxml/generated/indexsuper.py
@@ -4,16 +4,12 @@
# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
#
-from __future__ import print_function
-from __future__ import unicode_literals
import sys
from xml.dom import minidom
from xml.dom import Node
-import six
-
#
# User methods
#
@@ -28,12 +24,16 @@
class GeneratedsSuper(object):
def format_string(self, input_data, input_name=''):
return input_data
+
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
+
def format_float(self, input_data, input_name=''):
return '%f' % input_data
+
def format_double(self, input_data, input_name=''):
return '%e' % input_data
+
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
@@ -45,9 +45,9 @@ def format_boolean(self, input_data, input_name=''):
## from IPython.Shell import IPShellEmbed
## args = ''
-## ipshell = IPShellEmbed(args,
+# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
-## exit_msg = 'Leaving Interpreter, back to program.')
+# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
@@ -63,20 +63,23 @@ def format_boolean(self, input_data, input_name=''):
# Support/utility functions.
#
+
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
+
def quote_xml(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
+
def quote_attrib(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
+ s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
@@ -90,6 +93,7 @@ def quote_attrib(inStr):
s1 = '"%s"' % s1
return s1
+
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
@@ -121,26 +125,33 @@ class MixedContainer(object):
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
+
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
+
def getCategory(self):
return self.category
+
def getContenttype(self, content_type):
return self.content_type
+
def getValue(self):
return self.value
+
def getName(self):
return self.name
+
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
- self.value.export(outfile, level, namespace,name)
+ self.value.export(outfile, level, namespace, name)
+
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s%s>' % (self.name, self.value, self.name))
@@ -152,19 +163,20 @@ def exportSimple(self, outfile, level, name):
outfile.write('<%s>%f%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g%s>' % (self.name, self.value, self.name))
+
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' %
+ (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s",\n' % \
- (self.category, self.content_type, self.name,))
+ outfile.write('MixedContainer(%d, %d, "%s",\n' %
+ (self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
@@ -175,6 +187,7 @@ def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
+
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
@@ -190,12 +203,14 @@ def get_container(self): return self.container
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, version=None, compound=None):
self.version = version
if compound is None:
self.compound = []
else:
self.compound = compound
+
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
@@ -208,6 +223,7 @@ def add_compound(self, value): self.compound.append(value)
def insert_compound(self, index, value): self.compound[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
+
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -219,27 +235,34 @@ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacede
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
- outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
+ outfile.write(' version=%s' % (self.format_string(quote_attrib(
+ self.version).encode(ExternalEncoding), input_name='version'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
for compound_ in self.compound:
compound_.export(outfile, level, namespace_, name_='compound')
+
def hasContent_(self):
if (
self.compound is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = %s,\n' % (self.version,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('compound=[\n')
@@ -253,18 +276,21 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compound':
+ nodeName_ == 'compound':
obj_ = CompoundType.factory()
obj_.build(child_)
self.compound.append(obj_)
@@ -274,6 +300,7 @@ def buildChildren(self, child_, nodeName_):
class CompoundType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, refid=None, name=None, member=None):
self.kind = kind
self.refid = refid
@@ -282,6 +309,7 @@ def __init__(self, kind=None, refid=None, name=None, member=None):
self.member = []
else:
self.member = member
+
def factory(*args_, **kwargs_):
if CompoundType.subclass:
return CompoundType.subclass(*args_, **kwargs_)
@@ -298,6 +326,7 @@ def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -309,28 +338,35 @@ def export(self, outfile, level, namespace_='', name_='CompoundType', namespaced
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
+
def hasContent_(self):
if (
self.name is not None or
self.member is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='CompoundType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
@@ -338,9 +374,11 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
@@ -353,26 +391,29 @@ def exportLiteralChildren(self, outfile, level, name_):
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'member':
+ nodeName_ == 'member':
obj_ = MemberType.factory()
obj_.build(child_)
self.member.append(obj_)
@@ -382,10 +423,12 @@ def buildChildren(self, child_, nodeName_):
class MemberType(GeneratedsSuper):
subclass = None
superclass = None
+
def __init__(self, kind=None, refid=None, name=None):
self.kind = kind
self.refid = refid
self.name = name
+
def factory(*args_, **kwargs_):
if MemberType.subclass:
return MemberType.subclass(*args_, **kwargs_)
@@ -398,6 +441,7 @@ def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
+
def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
@@ -409,25 +453,32 @@ def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef
outfile.write('%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
+
def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(
+ self.refid).encode(ExternalEncoding), input_name='refid'), ))
+
def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
if self.name is not None:
showIndent(outfile, level)
- outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ outfile.write('<%sname>%s%sname>\n' % (namespace_, self.format_string(
+ quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+
def hasContent_(self):
if (
self.name is not None
- ):
+ ):
return True
else:
return False
+
def exportLiteral(self, outfile, level, name_='MemberType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
+
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
@@ -435,23 +486,28 @@ def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
+
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ outfile.write('name=%s,\n' % quote_python(
+ self.name).encode(ExternalEncoding))
+
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
+
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
+
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
+ nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
@@ -465,6 +521,7 @@ def buildChildren(self, child_, nodeName_):
-s Use the SAX parser, not the minidom parser.
"""
+
def usage():
print(USAGE_TEXT)
sys.exit(1)
@@ -479,7 +536,7 @@ def parse(inFileName):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -492,7 +549,7 @@ def parseString(inString):
doc = None
sys.stdout.write('\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
- namespacedef_='')
+ namespacedef_='')
return rootObj
@@ -518,9 +575,7 @@ def main():
usage()
-
-
if __name__ == '__main__':
main()
#import pdb
- #pdb.run('main()')
+ # pdb.run('main()')
diff --git a/docs/doxygen/doxyxml/generated/indexsuper.pyc b/docs/doxygen/doxyxml/generated/indexsuper.pyc
deleted file mode 100644
index ad401d2..0000000
Binary files a/docs/doxygen/doxyxml/generated/indexsuper.pyc and /dev/null differ
diff --git a/docs/doxygen/doxyxml/text.py b/docs/doxygen/doxyxml/text.py
index 87efd20..96c5648 100644
--- a/docs/doxygen/doxyxml/text.py
+++ b/docs/doxygen/doxyxml/text.py
@@ -4,25 +4,13 @@
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
Utilities for extracting text from generated classes.
"""
-from __future__ import unicode_literals
+
def is_string(txt):
if isinstance(txt, str):
@@ -34,11 +22,13 @@ def is_string(txt):
pass
return False
+
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
+
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
@@ -51,7 +41,8 @@ def description_bit(obj):
elif is_string(obj):
return obj
else:
- raise Exception('Expecting a string or something with content, content_ or value attribute')
+ raise Exception(
+ 'Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
diff --git a/docs/doxygen/doxyxml/text.pyc b/docs/doxygen/doxyxml/text.pyc
deleted file mode 100644
index 72bd36f..0000000
Binary files a/docs/doxygen/doxyxml/text.pyc and /dev/null differ
diff --git a/docs/doxygen/other/doxypy.py b/docs/doxygen/other/doxypy.py
new file mode 100644
index 0000000..28b1664
--- /dev/null
+++ b/docs/doxygen/other/doxypy.py
@@ -0,0 +1,446 @@
+#!/usr/bin/env python
+
+
+__applicationName__ = "doxypy"
+__blurb__ = """
+doxypy is an input filter for Doxygen. It preprocesses python
+files so that docstrings of classes and functions are reformatted
+into Doxygen-conform documentation blocks.
+"""
+
+__doc__ = __blurb__ + \
+ """
+In order to make Doxygen preprocess files through doxypy, simply
+add the following lines to your Doxyfile:
+ FILTER_SOURCE_FILES = YES
+ INPUT_FILTER = "python /path/to/doxypy.py"
+"""
+
+__version__ = "0.4.2"
+__date__ = "5th December 2008"
+__website__ = "http://code.foosel.org/doxypy"
+
+__author__ = (
+ "Philippe 'demod' Neumann (doxypy at demod dot org)",
+ "Gina 'foosel' Haeussge (gina at foosel dot net)"
+)
+
+__licenseName__ = "GPL v2"
+__license__ = """This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import sys
+import re
+
+from argparse import ArgumentParser
+
+
+class FSM(object):
+ """Implements a finite state machine.
+
+ Transitions are given as 4-tuples, consisting of an origin state, a target
+ state, a condition for the transition (given as a reference to a function
+ which gets called with a given piece of input) and a pointer to a function
+ to be called upon the execution of the given transition.
+ """
+
+ """
+ @var transitions holds the transitions
+ @var current_state holds the current state
+ @var current_input holds the current input
+ @var current_transition hold the currently active transition
+ """
+
+ def __init__(self, start_state=None, transitions=[]):
+ self.transitions = transitions
+ self.current_state = start_state
+ self.current_input = None
+ self.current_transition = None
+
+ def setStartState(self, state):
+ self.current_state = state
+
+ def addTransition(self, from_state, to_state, condition, callback):
+ self.transitions.append([from_state, to_state, condition, callback])
+
+ def makeTransition(self, input):
+ """ Makes a transition based on the given input.
+
+ @param input input to parse by the FSM
+ """
+ for transition in self.transitions:
+ [from_state, to_state, condition, callback] = transition
+ if from_state == self.current_state:
+ match = condition(input)
+ if match:
+ self.current_state = to_state
+ self.current_input = input
+ self.current_transition = transition
+ if args.debug:
+ print("# FSM: executing (%s -> %s) for line '%s'" %
+ (from_state, to_state, input), file=sys.stderr)
+ callback(match)
+ return
+
+
+class Doxypy(object):
+ def __init__(self):
+ string_prefixes = "[uU]?[rR]?"
+
+ self.start_single_comment_re = re.compile(
+ r"^\s*%s(''')" % string_prefixes)
+ self.end_single_comment_re = re.compile(r"(''')\s*$")
+
+ self.start_double_comment_re = re.compile(
+ r'^\s*%s(""")' % string_prefixes)
+ self.end_double_comment_re = re.compile(r'(""")\s*$')
+
+ self.single_comment_re = re.compile(
+ r"^\s*%s(''').*(''')\s*$" % string_prefixes)
+ self.double_comment_re = re.compile(
+ r'^\s*%s(""").*(""")\s*$' % string_prefixes)
+
+ self.defclass_re = re.compile(r"^(\s*)(def .+:|class .+:)")
+ self.empty_re = re.compile(r"^\s*$")
+ self.hashline_re = re.compile(r"^\s*#.*$")
+ self.importline_re = re.compile(r"^\s*(import |from .+ import)")
+
+ self.multiline_defclass_start_re = re.compile(
+ r"^(\s*)(def|class)(\s.*)?$")
+ self.multiline_defclass_end_re = re.compile(r":\s*$")
+
+ # Transition list format
+ # ["FROM", "TO", condition, action]
+ transitions = [
+ # FILEHEAD
+
+ # single line comments
+ ["FILEHEAD", "FILEHEAD", self.single_comment_re.search,
+ self.appendCommentLine],
+ ["FILEHEAD", "FILEHEAD", self.double_comment_re.search,
+ self.appendCommentLine],
+
+ # multiline comments
+ ["FILEHEAD", "FILEHEAD_COMMENT_SINGLE",
+ self.start_single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD",
+ self.end_single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD_COMMENT_SINGLE",
+ self.catchall, self.appendCommentLine],
+ ["FILEHEAD", "FILEHEAD_COMMENT_DOUBLE",
+ self.start_double_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD",
+ self.end_double_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD_COMMENT_DOUBLE",
+ self.catchall, self.appendCommentLine],
+
+ # other lines
+ ["FILEHEAD", "FILEHEAD", self.empty_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "FILEHEAD", self.hashline_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "FILEHEAD", self.importline_re.search,
+ self.appendFileheadLine],
+ ["FILEHEAD", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
+ ["FILEHEAD", "DEFCLASS_MULTI",
+ self.multiline_defclass_start_re.search, self.resetCommentSearch],
+ ["FILEHEAD", "DEFCLASS_BODY", self.catchall, self.appendFileheadLine],
+
+ # DEFCLASS
+
+ # single line comments
+ ["DEFCLASS", "DEFCLASS_BODY",
+ self.single_comment_re.search, self.appendCommentLine],
+ ["DEFCLASS", "DEFCLASS_BODY",
+ self.double_comment_re.search, self.appendCommentLine],
+
+ # multiline comments
+ ["DEFCLASS", "COMMENT_SINGLE",
+ self.start_single_comment_re.search, self.appendCommentLine],
+ ["COMMENT_SINGLE", "DEFCLASS_BODY",
+ self.end_single_comment_re.search, self.appendCommentLine],
+ ["COMMENT_SINGLE", "COMMENT_SINGLE",
+ self.catchall, self.appendCommentLine],
+ ["DEFCLASS", "COMMENT_DOUBLE",
+ self.start_double_comment_re.search, self.appendCommentLine],
+ ["COMMENT_DOUBLE", "DEFCLASS_BODY",
+ self.end_double_comment_re.search, self.appendCommentLine],
+ ["COMMENT_DOUBLE", "COMMENT_DOUBLE",
+ self.catchall, self.appendCommentLine],
+
+ # other lines
+ ["DEFCLASS", "DEFCLASS", self.empty_re.search, self.appendDefclassLine],
+ ["DEFCLASS", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
+ ["DEFCLASS", "DEFCLASS_MULTI",
+ self.multiline_defclass_start_re.search, self.resetCommentSearch],
+ ["DEFCLASS", "DEFCLASS_BODY", self.catchall, self.stopCommentSearch],
+
+ # DEFCLASS_BODY
+
+ ["DEFCLASS_BODY", "DEFCLASS",
+ self.defclass_re.search, self.startCommentSearch],
+ ["DEFCLASS_BODY", "DEFCLASS_MULTI",
+ self.multiline_defclass_start_re.search, self.startCommentSearch],
+ ["DEFCLASS_BODY", "DEFCLASS_BODY", self.catchall, self.appendNormalLine],
+
+ # DEFCLASS_MULTI
+ ["DEFCLASS_MULTI", "DEFCLASS",
+ self.multiline_defclass_end_re.search, self.appendDefclassLine],
+ ["DEFCLASS_MULTI", "DEFCLASS_MULTI",
+ self.catchall, self.appendDefclassLine],
+ ]
+
+ self.fsm = FSM("FILEHEAD", transitions)
+ self.outstream = sys.stdout
+
+ self.output = []
+ self.comment = []
+ self.filehead = []
+ self.defclass = []
+ self.indent = ""
+
+ def __closeComment(self):
+ """Appends any open comment block and triggering block to the output."""
+
+ if args.autobrief:
+ if len(self.comment) == 1 \
+ or (len(self.comment) > 2 and self.comment[1].strip() == ''):
+ self.comment[0] = self.__docstringSummaryToBrief(
+ self.comment[0])
+
+ if self.comment:
+ block = self.makeCommentBlock()
+ self.output.extend(block)
+
+ if self.defclass:
+ self.output.extend(self.defclass)
+
+ def __docstringSummaryToBrief(self, line):
+ """Adds \\brief to the docstrings summary line.
+
+ A \\brief is prepended, provided no other doxygen command is at the
+ start of the line.
+ """
+ stripped = line.strip()
+ if stripped and not stripped[0] in ('@', '\\'):
+ return "\\brief " + line
+ else:
+ return line
+
+ def __flushBuffer(self):
+ """Flushes the current outputbuffer to the outstream."""
+ if self.output:
+ try:
+ if args.debug:
+ print("# OUTPUT: ", self.output, file=sys.stderr)
+ print("\n".join(self.output), file=self.outstream)
+ self.outstream.flush()
+ except IOError:
+ # Fix for FS#33. Catches "broken pipe" when doxygen closes
+ # stdout prematurely upon usage of INPUT_FILTER, INLINE_SOURCES
+ # and FILTER_SOURCE_FILES.
+ pass
+ self.output = []
+
+ def catchall(self, input):
+ """The catchall-condition, always returns true."""
+ return True
+
+ def resetCommentSearch(self, match):
+ """Restarts a new comment search for a different triggering line.
+
+ Closes the current commentblock and starts a new comment search.
+ """
+ if args.debug:
+ print("# CALLBACK: resetCommentSearch", file=sys.stderr)
+ self.__closeComment()
+ self.startCommentSearch(match)
+
+ def startCommentSearch(self, match):
+ """Starts a new comment search.
+
+ Saves the triggering line, resets the current comment and saves
+ the current indentation.
+ """
+ if args.debug:
+ print("# CALLBACK: startCommentSearch", file=sys.stderr)
+ self.defclass = [self.fsm.current_input]
+ self.comment = []
+ self.indent = match.group(1)
+
+ def stopCommentSearch(self, match):
+ """Stops a comment search.
+
+ Closes the current commentblock, resets the triggering line and
+ appends the current line to the output.
+ """
+ if args.debug:
+ print("# CALLBACK: stopCommentSearch", file=sys.stderr)
+ self.__closeComment()
+
+ self.defclass = []
+ self.output.append(self.fsm.current_input)
+
+ def appendFileheadLine(self, match):
+ """Appends a line in the FILEHEAD state.
+
+ Closes the open comment block, resets it and appends the current line.
+ """
+ if args.debug:
+ print("# CALLBACK: appendFileheadLine", file=sys.stderr)
+ self.__closeComment()
+ self.comment = []
+ self.output.append(self.fsm.current_input)
+
+ def appendCommentLine(self, match):
+ """Appends a comment line.
+
+ The comment delimiter is removed from multiline start and ends as
+ well as singleline comments.
+ """
+ if args.debug:
+ print("# CALLBACK: appendCommentLine", file=sys.stderr)
+ (from_state, to_state, condition, callback) = self.fsm.current_transition
+
+ # single line comment
+ if (from_state == "DEFCLASS" and to_state == "DEFCLASS_BODY") \
+ or (from_state == "FILEHEAD" and to_state == "FILEHEAD"):
+ # remove comment delimiter from begin and end of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[line.find(
+ activeCommentDelim) + len(activeCommentDelim):line.rfind(activeCommentDelim)])
+
+ if (to_state == "DEFCLASS_BODY"):
+ self.__closeComment()
+ self.defclass = []
+ # multiline start
+ elif from_state == "DEFCLASS" or from_state == "FILEHEAD":
+ # remove comment delimiter from begin of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(
+ line[line.find(activeCommentDelim) + len(activeCommentDelim):])
+ # multiline end
+ elif to_state == "DEFCLASS_BODY" or to_state == "FILEHEAD":
+ # remove comment delimiter from end of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[0:line.rfind(activeCommentDelim)])
+ if (to_state == "DEFCLASS_BODY"):
+ self.__closeComment()
+ self.defclass = []
+ # in multiline comment
+ else:
+ # just append the comment line
+ self.comment.append(self.fsm.current_input)
+
+ def appendNormalLine(self, match):
+ """Appends a line to the output."""
+ if args.debug:
+ print("# CALLBACK: appendNormalLine", file=sys.stderr)
+ self.output.append(self.fsm.current_input)
+
+ def appendDefclassLine(self, match):
+ """Appends a line to the triggering block."""
+ if args.debug:
+ print("# CALLBACK: appendDefclassLine", file=sys.stderr)
+ self.defclass.append(self.fsm.current_input)
+
+ def makeCommentBlock(self):
+ """Indents the current comment block with respect to the current
+ indentation level.
+
+ @returns a list of indented comment lines
+ """
+ doxyStart = "##"
+ commentLines = self.comment
+
+ commentLines = ["%s# %s" % (self.indent, x) for x in commentLines]
+ l = [self.indent + doxyStart]
+ l.extend(commentLines)
+
+ return l
+
+ def parse(self, input):
+ """Parses a python file given as input string and returns the doxygen-
+ compatible representation.
+
+ @param input the python code to parse
+ @returns the modified python code
+ """
+ lines = input.split("\n")
+
+ for line in lines:
+ self.fsm.makeTransition(line)
+
+ if self.fsm.current_state == "DEFCLASS":
+ self.__closeComment()
+
+ return "\n".join(self.output)
+
+ def parseFile(self, filename):
+ """Parses a python file given as input string and returns the doxygen-
+ compatible representation.
+
+ @param input the python code to parse
+ @returns the modified python code
+ """
+ f = open(filename, 'r')
+
+ for line in f:
+ self.parseLine(line.rstrip('\r\n'))
+ if self.fsm.current_state == "DEFCLASS":
+ self.__closeComment()
+ self.__flushBuffer()
+ f.close()
+
+ def parseLine(self, line):
+ """Parse one line of python and flush the resulting output to the
+ outstream.
+
+ @param line the python code line to parse
+ """
+ self.fsm.makeTransition(line)
+ self.__flushBuffer()
+
+
+def argParse():
+ """Parses commandline args."""
+ parser = ArgumentParser(prog=__applicationName__)
+
+ parser.add_argument("--version", action="version",
+ version="%(prog)s " + __version__
+ )
+ parser.add_argument("--autobrief", action="store_true",
+ help="use the docstring summary line as \\brief description"
+ )
+ parser.add_argument("--debug", action="store_true",
+ help="enable debug output on stderr"
+ )
+ parser.add_argument("filename", metavar="FILENAME")
+
+ return parser.parse_args()
+
+
+def main():
+ """Starts the parser on the file given by the filename as the first
+ argument on the commandline.
+ """
+ global args
+ args = argParse()
+ fsm = Doxypy()
+ fsm.parseFile(args.filename)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/doxygen/other/group_defs.dox b/docs/doxygen/other/group_defs.dox
index 340318f..94aff62 100644
--- a/docs/doxygen/other/group_defs.dox
+++ b/docs/doxygen/other/group_defs.dox
@@ -4,4 +4,3 @@
* module are listed here or in the subcategories below.
*
*/
-
diff --git a/docs/doxygen/pydoc_macros.h b/docs/doxygen/pydoc_macros.h
new file mode 100644
index 0000000..fb3954b
--- /dev/null
+++ b/docs/doxygen/pydoc_macros.h
@@ -0,0 +1,19 @@
+#ifndef PYDOC_MACROS_H
+#define PYDOC_MACROS_H
+
+#define __EXPAND(x) x
+#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
+#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
+#define __CAT1(a, b) a##b
+#define __CAT2(a, b) __CAT1(a, b)
+#define __DOC1(n1) __doc_##n1
+#define __DOC2(n1, n2) __doc_##n1##_##n2
+#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
+#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
+#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
+#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
+#define __DOC7(n1, n2, n3, n4, n5, n6, n7) \
+ __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
+#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
+
+#endif // PYDOC_MACROS_H
diff --git a/docs/doxygen/swig_doc.pyc b/docs/doxygen/swig_doc.pyc
deleted file mode 100644
index 6c03427..0000000
Binary files a/docs/doxygen/swig_doc.pyc and /dev/null differ
diff --git a/docs/doxygen/swig_doc.py b/docs/doxygen/update_pydoc.py
similarity index 51%
rename from docs/doxygen/swig_doc.py
rename to docs/doxygen/update_pydoc.py
index 6b74c01..b65e168 100644
--- a/docs/doxygen/swig_doc.py
+++ b/docs/doxygen/update_pydoc.py
@@ -1,42 +1,38 @@
#
# Copyright 2010-2012 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gnuradio
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
#
"""
-Creates the swig_doc.i SWIG interface file.
-Execute using: python swig_doc.py xml_path outputfilename
+Updates the *pydoc_h files for a module
+Execute using: python update_pydoc.py xml_path outputfilename
-The file instructs SWIG to transfer the doxygen comments into the
+The file instructs Pybind11 to transfer the doxygen comments into the
python docstrings.
"""
-from __future__ import unicode_literals
-import sys, time
+import os
+import sys
+import time
+import glob
+import re
+import json
+from argparse import ArgumentParser
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
from doxyxml import DoxyOther, base
+
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
+
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
@@ -61,6 +57,7 @@ def includes(cls, item):
is_a_block = di.has_member(friendname, DoxyFunction)
return is_a_block
+
class Block2(object):
"""
Checks if doxyxml produced objects correspond to a new style
@@ -74,7 +71,8 @@ def includes(cls, item):
# Check for a parsing error.
if item.error():
return False
- is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther)
+ is_a_block2 = item.has_member(
+ 'make', DoxyFunction) and item.has_member('sptr', DoxyOther)
return is_a_block2
@@ -86,6 +84,7 @@ def utoascii(text):
return ''
out = text.encode('ascii', 'replace')
# swig will require us to replace blackslash with 4 backslashes
+ # TODO: evaluate what this should be for pybind11
out = out.replace(b'\\', b'\\\\\\\\')
out = out.replace(b'"', b'\\"').decode('ascii')
return str(out)
@@ -104,6 +103,7 @@ def combine_descriptions(obj):
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
+
def format_params(parameteritems):
output = ['Args:']
template = ' {0} : {1}'
@@ -111,10 +111,13 @@ def format_params(parameteritems):
output.append(template.format(pi.name, pi.description))
return '\n'.join(output)
+
entry_templ = '%feature("docstring") {name} "{docstring}"'
+
+
def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
"""
- Create a docstring entry for a swig interface file.
+ Create a docstring key/value pair, where the key is the object name.
obj - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to obj.name())
@@ -124,7 +127,9 @@ def make_entry(obj, name=None, templ="{description}", description=None, params=[
used as the description instead of extracting it from obj.
"""
if name is None:
- name=obj.name()
+ name = obj.name()
+ if hasattr(obj, '_parse_data') and hasattr(obj._parse_data, 'definition'):
+ name = obj._parse_data.definition.split(' ')[-1]
if "operator " in name:
return ''
if description is None:
@@ -133,56 +138,28 @@ def make_entry(obj, name=None, templ="{description}", description=None, params=[
description += '\n\n'
description += utoascii(format_params(params))
docstring = templ.format(description=description)
- if not docstring:
- return ''
- return entry_templ.format(
- name=name,
- docstring=docstring,
- )
-
-
-def make_func_entry(func, name=None, description=None, params=None):
- """
- Create a function docstring entry for a swig interface file.
- func - a doxyxml object from which documentation will be extracted.
- name - the name of the C object (defaults to func.name())
- description - if this optional variable is set then it's value is
- used as the description instead of extracting it from func.
- params - a parameter list that overrides using func.params.
- """
- #if params is None:
- # params = func.params
- #params = [prm.declname for prm in params]
- #if params:
- # sig = "Params: (%s)" % ", ".join(params)
- #else:
- # sig = "Params: (NONE)"
- #templ = "{description}\n\n" + sig
- #return make_entry(func, name=name, templ=utoascii(templ),
- # description=description)
- return make_entry(func, name=name, description=description, params=params)
+ return {name: docstring}
def make_class_entry(klass, description=None, ignored_methods=[], params=None):
"""
- Create a class docstring for a swig interface file.
+ Create a class docstring key/value pair.
"""
if params is None:
params = klass.params
- output = []
- output.append(make_entry(klass, description=description, params=params))
+ output = {}
+ output.update(make_entry(klass, description=description, params=params))
for func in klass.in_category(DoxyFunction):
if func.name() not in ignored_methods:
name = klass.name() + '::' + func.name()
- output.append(make_func_entry(func, name=name))
- return "\n\n".join(output)
+ output.update(make_entry(func, name=name))
+ return output
def make_block_entry(di, block):
"""
- Create class and function docstrings of a gnuradio block for a
- swig interface file.
+ Create class and function docstrings of a gnuradio block
"""
descriptions = []
# Get the documentation associated with the class.
@@ -207,48 +184,42 @@ def make_block_entry(di, block):
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
- output = []
- output.append(make_class_entry(block, description=super_description))
- output.append(make_func_entry(make_func, description=super_description,
- params=block.params))
- return "\n\n".join(output)
+ output = {}
+ output.update(make_class_entry(block, description=super_description))
+ output.update(make_entry(make_func, description=super_description,
+ params=block.params))
+ return output
+
def make_block2_entry(di, block):
"""
- Create class and function docstrings of a new style gnuradio block for a
- swig interface file.
+ Create class and function docstrings of a new style gnuradio block
"""
- descriptions = []
# For new style blocks all the relevant documentation should be
# associated with the 'make' method.
class_description = combine_descriptions(block)
make_func = block.get_member('make', DoxyFunction)
make_description = combine_descriptions(make_func)
- description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description
+ description = class_description + \
+ "\n\nConstructor Specific Documentation:\n\n" + make_description
# Associate the combined description with the class and
# the make function.
- output = []
- output.append(make_class_entry(
- block, description=description,
- ignored_methods=['make'], params=make_func.params))
+ output = {}
+ output.update(make_class_entry(
+ block, description=description,
+ ignored_methods=['make'], params=make_func.params))
makename = block.name() + '::make'
- output.append(make_func_entry(
- make_func, name=makename, description=description,
- params=make_func.params))
- return "\n\n".join(output)
+ output.update(make_entry(
+ make_func, name=makename, description=description,
+ params=make_func.params))
+ return output
-def make_swig_interface_file(di, swigdocfilename, custom_output=None):
- output = ["""
-/*
- * This file was automatically generated using swig_doc.py.
- *
- * Any changes to it will be lost next time it is regenerated.
- */
-"""]
+def get_docstrings_dict(di, custom_output=None):
- if custom_output is not None:
- output.append(custom_output)
+ output = {}
+ if custom_output:
+ output.update(custom_output)
# Create docstrings for the blocks.
blocks = di.in_category(Block)
@@ -261,21 +232,23 @@ def make_swig_interface_file(di, swigdocfilename, custom_output=None):
# Don't want to risk writing to output twice.
if make_func.name() not in make_funcs:
make_funcs.add(make_func.name())
- output.append(make_block_entry(di, block))
+ output.update(make_block_entry(di, block))
except block.ParsingError:
- sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
+ sys.stderr.write(
+ 'Parsing error for block {0}\n'.format(block.name()))
raise
for block in blocks2:
try:
make_func = block.get_member('make', DoxyFunction)
- make_func_name = block.name() +'::make'
+ make_func_name = block.name() + '::make'
# Don't want to risk writing to output twice.
if make_func_name not in make_funcs:
make_funcs.add(make_func_name)
- output.append(make_block2_entry(di, block))
+ output.update(make_block2_entry(di, block))
except block.ParsingError:
- sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
+ sys.stderr.write(
+ 'Parsing error for block {0}\n'.format(block.name()))
raise
# Create docstrings for functions
@@ -284,9 +257,10 @@ def make_swig_interface_file(di, swigdocfilename, custom_output=None):
if f.name() not in make_funcs and not f.name().startswith('std::')]
for f in funcs:
try:
- output.append(make_func_entry(f))
+ output.update(make_entry(f))
except f.ParsingError:
- sys.stderr.write('Parsing error for function {0}\n'.format(f.name()))
+ sys.stderr.write(
+ 'Parsing error for function {0}\n'.format(f.name()))
# Create docstrings for classes
block_names = [block.name() for block in blocks]
@@ -295,37 +269,104 @@ def make_swig_interface_file(di, swigdocfilename, custom_output=None):
if k.name() not in block_names and not k.name().startswith('std::')]
for k in klasses:
try:
- output.append(make_class_entry(k))
+ output.update(make_class_entry(k))
except k.ParsingError:
sys.stderr.write('Parsing error for class {0}\n'.format(k.name()))
# Docstrings are not created for anything that is not a function or a class.
# If this excludes anything important please add it here.
- output = "\n\n".join(output)
+ return output
+
+
+def sub_docstring_in_pydoc_h(pydoc_files, docstrings_dict, output_dir, filter_str=None):
+ if filter_str:
+ docstrings_dict = {
+ k: v for k, v in docstrings_dict.items() if k.startswith(filter_str)}
+
+ with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file:
+
+ for pydoc_file in pydoc_files:
+ if filter_str:
+ filter_str2 = "::".join((filter_str, os.path.split(
+ pydoc_file)[-1].split('_pydoc_template.h')[0]))
+ docstrings_dict2 = {
+ k: v for k, v in docstrings_dict.items() if k.startswith(filter_str2)}
+ else:
+ docstrings_dict2 = docstrings_dict
+
+ file_in = open(pydoc_file, 'r').read()
+ for key, value in docstrings_dict2.items():
+ file_in_tmp = file_in
+ try:
+ doc_key = key.split("::")
+ # if 'gr' in doc_key:
+ # doc_key.remove('gr')
+ doc_key = '_'.join(doc_key)
+ regexp = r'(__doc_{} =\sR\"doc\()[^)]*(\)doc\")'.format(
+ doc_key)
+ regexp = re.compile(regexp, re.MULTILINE)
+
+ (file_in, nsubs) = regexp.subn(
+ r'\1' + value + r'\2', file_in, count=1)
+ if nsubs == 1:
+ status_file.write("PASS: " + pydoc_file + "\n")
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt
+ except: # be permissive, TODO log, but just leave the docstring blank
+ status_file.write("FAIL: " + pydoc_file + "\n")
+ file_in = file_in_tmp
+
+ output_pathname = os.path.join(output_dir, os.path.basename(
+ pydoc_file).replace('_template.h', '.h'))
+ with open(output_pathname, 'w') as file_out:
+ file_out.write(file_in)
+
+
+def copy_docstring_templates(pydoc_files, output_dir):
+ with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file:
+ for pydoc_file in pydoc_files:
+ file_in = open(pydoc_file, 'r').read()
+ output_pathname = os.path.join(output_dir, os.path.basename(
+ pydoc_file).replace('_template.h', '.h'))
+ with open(output_pathname, 'w') as file_out:
+ file_out.write(file_in)
+ status_file.write("DONE")
+
+
+def argParse():
+ """Parses commandline args."""
+ desc = 'Scrape the doxygen generated xml for docstrings to insert into python bindings'
+ parser = ArgumentParser(description=desc)
+
+ parser.add_argument("function", help="Operation to perform on docstrings", choices=[
+ "scrape", "sub", "copy"])
+
+ parser.add_argument("--xml_path")
+ parser.add_argument("--bindings_dir")
+ parser.add_argument("--output_dir")
+ parser.add_argument("--json_path")
+ parser.add_argument("--filter", default=None)
+
+ return parser.parse_args()
- swig_doc = open(swigdocfilename, 'w')
- swig_doc.write(output)
- swig_doc.close()
if __name__ == "__main__":
# Parse command line options and set up doxyxml.
- err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
- if len(sys.argv) != 3:
- raise Exception(err_msg)
- xml_path = sys.argv[1]
- swigdocfilename = sys.argv[2]
- di = DoxyIndex(xml_path)
-
- # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
- # This is presumably a bug in SWIG.
- #msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
- #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
- #delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
- output = []
- #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
- #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
- custom_output = "\n\n".join(output)
-
- # Generate the docstrings interface file.
- make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
+ args = argParse()
+ if args.function.lower() == 'scrape':
+ di = DoxyIndex(args.xml_path)
+ docstrings_dict = get_docstrings_dict(di)
+ with open(args.json_path, 'w') as fp:
+ json.dump(docstrings_dict, fp)
+ elif args.function.lower() == 'sub':
+ with open(args.json_path, 'r') as fp:
+ docstrings_dict = json.load(fp)
+ pydoc_files = glob.glob(os.path.join(
+ args.bindings_dir, '*_pydoc_template.h'))
+ sub_docstring_in_pydoc_h(
+ pydoc_files, docstrings_dict, args.output_dir, args.filter)
+ elif args.function.lower() == 'copy':
+ pydoc_files = glob.glob(os.path.join(
+ args.bindings_dir, '*_pydoc_template.h'))
+ copy_docstring_templates(pydoc_files, args.output_dir)
diff --git a/grc/CMakeLists.txt b/grc/CMakeLists.txt
index f011b98..62fde76 100644
--- a/grc/CMakeLists.txt
+++ b/grc/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
install(FILES
grverilog_verilog_axi_xx.block.yml DESTINATION share/gnuradio/grc/blocks
diff --git a/grc/grverilog_verilog_axi_xx.block.yml b/grc/grverilog_verilog_axi_xx.block.yml
index b53c389..af8702e 100644
--- a/grc/grverilog_verilog_axi_xx.block.yml
+++ b/grc/grverilog_verilog_axi_xx.block.yml
@@ -3,7 +3,7 @@ label: Verilog_AXI
category: '[GrVerilog]'
templates:
- imports: import verilog
+ imports: from gnuradio import verilog
make: verilog.verilog_axi_${type.fcn}(${file}, ${overwrite}, ${IO_ratio}, ${verilator_options}, ${module_flag}, ${skip_output_items})
diff --git a/include/verilog/CMakeLists.txt b/include/gnuradio/verilog/CMakeLists.txt
similarity index 100%
rename from include/verilog/CMakeLists.txt
rename to include/gnuradio/verilog/CMakeLists.txt
diff --git a/include/verilog/Shared_lib.h b/include/gnuradio/verilog/Shared_lib.h
similarity index 100%
rename from include/verilog/Shared_lib.h
rename to include/gnuradio/verilog/Shared_lib.h
diff --git a/include/verilog/Shell_cmd.h b/include/gnuradio/verilog/Shell_cmd.h
similarity index 100%
rename from include/verilog/Shell_cmd.h
rename to include/gnuradio/verilog/Shell_cmd.h
diff --git a/include/verilog/api.h b/include/gnuradio/verilog/api.h
similarity index 100%
rename from include/verilog/api.h
rename to include/gnuradio/verilog/api.h
diff --git a/include/verilog/constants.h b/include/gnuradio/verilog/constants.h
similarity index 97%
rename from include/verilog/constants.h
rename to include/gnuradio/verilog/constants.h
index 8989101..0082854 100644
--- a/include/verilog/constants.h
+++ b/include/gnuradio/verilog/constants.h
@@ -22,7 +22,7 @@
#ifndef INCLUDED_GR_VERILOG_CONSTANTS_H
#define INCLUDED_GR_VERILOG_CONSTANTS_H
-#include
+#include
#include
namespace gr {
diff --git a/include/verilog/gr_verilog_iotype.h b/include/gnuradio/verilog/gr_verilog_iotype.h
similarity index 100%
rename from include/verilog/gr_verilog_iotype.h
rename to include/gnuradio/verilog/gr_verilog_iotype.h
diff --git a/include/verilog/verilog_axi_bb.h b/include/gnuradio/verilog/verilog_axi_bb.h
similarity index 95%
rename from include/verilog/verilog_axi_bb.h
rename to include/gnuradio/verilog/verilog_axi_bb.h
index 22d9203..e325409 100644
--- a/include/verilog/verilog_axi_bb.h
+++ b/include/gnuradio/verilog/verilog_axi_bb.h
@@ -22,7 +22,7 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_BB_H
#define INCLUDED_VERILOG_VERILOG_AXI_BB_H
-#include
+#include
#include
namespace gr {
@@ -36,7 +36,7 @@ namespace gr {
class VERILOG_API verilog_axi_bb : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Return a shared_ptr to a new instance of verilog::verilog_axi_bb.
diff --git a/include/verilog/verilog_axi_cc.h b/include/gnuradio/verilog/verilog_axi_cc.h
similarity index 95%
rename from include/verilog/verilog_axi_cc.h
rename to include/gnuradio/verilog/verilog_axi_cc.h
index ef26fd5..81930ec 100644
--- a/include/verilog/verilog_axi_cc.h
+++ b/include/gnuradio/verilog/verilog_axi_cc.h
@@ -22,7 +22,7 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_CC_H
#define INCLUDED_VERILOG_VERILOG_AXI_CC_H
-#include
+#include
#include
namespace gr {
@@ -36,7 +36,7 @@ namespace gr {
class VERILOG_API verilog_axi_cc : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Return a shared_ptr to a new instance of verilog::verilog_axi_cc.
diff --git a/include/verilog/verilog_axi_ff.h b/include/gnuradio/verilog/verilog_axi_ff.h
similarity index 95%
rename from include/verilog/verilog_axi_ff.h
rename to include/gnuradio/verilog/verilog_axi_ff.h
index 275a695..8baaee0 100644
--- a/include/verilog/verilog_axi_ff.h
+++ b/include/gnuradio/verilog/verilog_axi_ff.h
@@ -22,7 +22,7 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_FF_H
#define INCLUDED_VERILOG_VERILOG_AXI_FF_H
-#include
+#include
#include
namespace gr {
@@ -36,7 +36,7 @@ namespace gr {
class VERILOG_API verilog_axi_ff : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Return a shared_ptr to a new instance of verilog::verilog_axi_ff.
diff --git a/include/verilog/verilog_axi_ii.h b/include/gnuradio/verilog/verilog_axi_ii.h
similarity index 95%
rename from include/verilog/verilog_axi_ii.h
rename to include/gnuradio/verilog/verilog_axi_ii.h
index 68d0998..7b79953 100644
--- a/include/verilog/verilog_axi_ii.h
+++ b/include/gnuradio/verilog/verilog_axi_ii.h
@@ -22,7 +22,7 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_II_H
#define INCLUDED_VERILOG_VERILOG_AXI_II_H
-#include
+#include
#include
namespace gr {
@@ -36,7 +36,7 @@ namespace gr {
class VERILOG_API verilog_axi_ii : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Return a shared_ptr to a new instance of verilog::verilog_axi_ii.
diff --git a/include/verilog/verilog_axi_ss.h b/include/gnuradio/verilog/verilog_axi_ss.h
similarity index 95%
rename from include/verilog/verilog_axi_ss.h
rename to include/gnuradio/verilog/verilog_axi_ss.h
index 7e259f2..0b82b8c 100644
--- a/include/verilog/verilog_axi_ss.h
+++ b/include/gnuradio/verilog/verilog_axi_ss.h
@@ -22,7 +22,7 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_SS_H
#define INCLUDED_VERILOG_VERILOG_AXI_SS_H
-#include
+#include
#include
namespace gr {
@@ -36,7 +36,7 @@ namespace gr {
class VERILOG_API verilog_axi_ss : virtual public gr::block
{
public:
- typedef boost::shared_ptr sptr;
+ typedef std::shared_ptr sptr;
/*!
* \brief Return a shared_ptr to a new instance of verilog::verilog_axi_ss.
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index 4893d81..ea451b2 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -1,30 +1,16 @@
# Copyright 2011,2012,2016,2018,2019 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Setup library
########################################################################
include(GrPlatform) #define LIB_SUFFIX
-include_directories(${Boost_INCLUDE_DIR})
-link_directories(${Boost_LIBRARY_DIRS})
-
list(APPEND verilog_sources
${CMAKE_CURRENT_BINARY_DIR}/constants.cc
verilog_axi_ii_impl.cc
@@ -44,7 +30,7 @@ if(NOT verilog_sources)
endif(NOT verilog_sources)
add_library(gnuradio-verilog SHARED ${verilog_sources})
-target_link_libraries(gnuradio-verilog ${Boost_LIBRARIES} gnuradio::gnuradio-runtime)
+target_link_libraries(gnuradio-verilog gnuradio::gnuradio-runtime)
target_include_directories(gnuradio-verilog
PUBLIC $
PUBLIC $
@@ -69,30 +55,6 @@ GR_LIBRARY_FOO(gnuradio-verilog RUNTIME_COMPONENT "verilog_runtime" DEVEL_COMPON
message(STATUS "Using install prefix: ${CMAKE_INSTALL_PREFIX}")
message(STATUS "Building for version: ${VERSION} / ${LIBVER}")
-########################################################################
-# Build and register unit test
-########################################################################
-include(GrTest)
-
-include_directories(${CPPUNIT_INCLUDE_DIRS})
-
-list(APPEND test_verilog_sources
- ${CMAKE_CURRENT_SOURCE_DIR}/test_verilog.cc
- ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog.cc
-)
-
-add_executable(test-verilog ${test_verilog_sources})
-
-target_link_libraries(
- test-verilog
- ${GNURADIO_RUNTIME_LIBRARIES}
- ${Boost_LIBRARIES}
- ${CPPUNIT_LIBRARIES}
- gnuradio-verilog
-)
-
-GR_ADD_TEST(test_verilog test-verilog)
-
########################################################################
# Configure templates
diff --git a/lib/Shared_lib.cpp b/lib/Shared_lib.cpp
index dd64e27..6c55e7a 100644
--- a/lib/Shared_lib.cpp
+++ b/lib/Shared_lib.cpp
@@ -22,7 +22,7 @@
#include
#include
#include
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define SLASH '/'
#define _EXIT_FAILURE -1
diff --git a/lib/Shell_cmd.cpp b/lib/Shell_cmd.cpp
index 28aa3e9..27f5b4f 100644
--- a/lib/Shell_cmd.cpp
+++ b/lib/Shell_cmd.cpp
@@ -23,7 +23,7 @@
#include
#include
#include
-#include "verilog/Shell_cmd.h"
+#include "gnuradio/verilog/Shell_cmd.h"
#define BUFFER_SIZE 1024
#define _EXIT_FAILURE -1
diff --git a/lib/constants.cc.in b/lib/constants.cc.in
index 98d8374..3af46fa 100644
--- a/lib/constants.cc.in
+++ b/lib/constants.cc.in
@@ -18,7 +18,7 @@
* Boston, MA 02110-1301, USA.
*/
-#include "verilog/constants.h"
+#include "gnuradio/verilog/constants.h"
#include
namespace gr {
diff --git a/lib/qa_verilog.cc b/lib/qa_verilog.cc
deleted file mode 100644
index d86c3de..0000000
--- a/lib/qa_verilog.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2012 Free Software Foundation, Inc.
- *
- * This file is part of GNU Radio
- *
- * GNU Radio is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3, or (at your option)
- * any later version.
- *
- * GNU Radio is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU Radio; see the file COPYING. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street,
- * Boston, MA 02110-1301, USA.
- */
-
-/*
- * This class gathers together all the test cases for the gr-filter
- * directory into a single test suite. As you create new test cases,
- * add them here.
- */
-
-#include "qa_verilog.h"
-
-CppUnit::TestSuite *
-qa_verilog::suite()
-{
- CppUnit::TestSuite *s = new CppUnit::TestSuite("verilog");
-
- return s;
-}
diff --git a/lib/qa_verilog.h b/lib/qa_verilog.h
deleted file mode 100644
index ce27264..0000000
--- a/lib/qa_verilog.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* -*- c++ -*- */
-/*
- * Copyright 2012 Free Software Foundation, Inc.
- *
- * This file is part of GNU Radio
- *
- * GNU Radio is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3, or (at your option)
- * any later version.
- *
- * GNU Radio is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU Radio; see the file COPYING. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street,
- * Boston, MA 02110-1301, USA.
- */
-
-#ifndef _QA_VERILOG_H_
-#define _QA_VERILOG_H_
-
-#include
-#include
-
-//! collect all the tests for the gr-filter directory
-
-class __GR_ATTR_EXPORT qa_verilog
-{
- public:
- //! return suite of tests for all of gr-filter directory
- static CppUnit::TestSuite *suite();
-};
-
-#endif /* _QA_VERILOG_H_ */
diff --git a/lib/test_verilog.cc b/lib/test_verilog.cc
deleted file mode 100644
index 638bda9..0000000
--- a/lib/test_verilog.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/* -*- c++ -*- */
-/*
- * Copyright 2012 Free Software Foundation, Inc.
- *
- * This file is part of GNU Radio
- *
- * GNU Radio is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3, or (at your option)
- * any later version.
- *
- * GNU Radio is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU Radio; see the file COPYING. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street,
- * Boston, MA 02110-1301, USA.
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include
-#include
-
-#include
-#include "qa_verilog.h"
-#include
-#include
-
-int
-main (int argc, char **argv)
-{
- CppUnit::TextTestRunner runner;
- std::ofstream xmlfile(get_unittest_path("verilog.xml").c_str());
- CppUnit::XmlOutputter *xmlout = new CppUnit::XmlOutputter(&runner.result(), xmlfile);
-
- runner.addTest(qa_verilog::suite());
- runner.setOutputter(xmlout);
-
- bool was_successful = runner.run("", false);
-
- return was_successful ? 0 : 1;
-}
diff --git a/lib/verilog_axi_bb_impl.cc b/lib/verilog_axi_bb_impl.cc
index e3e6a98..fdd2a48 100644
--- a/lib/verilog_axi_bb_impl.cc
+++ b/lib/verilog_axi_bb_impl.cc
@@ -30,10 +30,10 @@
#include
-#include "verilog/constants.h"
+#include "gnuradio/verilog/constants.h"
-#include "verilog/Shell_cmd.h"
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shell_cmd.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk"
#define CPP_TEMPLATE_NAME "axi_module.cpp"
@@ -143,7 +143,8 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->generate_so();
- } catch (std::runtime_error) {
+
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -165,7 +166,8 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->load_lib();
- } catch (std::runtime_error) {
+
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -242,8 +244,8 @@ namespace gr {
// Do <+signal processing+>
- unsigned int input_i;
- unsigned int output_i;
+ int input_i;
+ int output_i;
for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];)
{
unsigned char status_code;
@@ -251,7 +253,7 @@ namespace gr {
try {
status_code =
this->sim(in[input_i], out[output_i], this->main_time);
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -355,7 +357,7 @@ namespace gr {
cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME;
cmd += std::string(" ") + " M_DIR=" + M_dir;
// cmd += verilator_options:
- cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options;
+ cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\"";
cmd += ENTER;
cmd += ENTER;
@@ -399,11 +401,12 @@ namespace gr {
}
bool
- verilog_axi_bb_impl::test_access(const char *filepath, const char *err_msg = "")
+ verilog_axi_bb_impl::test_access(const char *filepath, const char *err_msg = NULL)
{
if ( access(filepath, R_OK) == _EXIT_FAILURE ) {
- if (err_msg != "") {
+
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% filepath
@@ -418,13 +421,14 @@ namespace gr {
}
bool
- verilog_axi_bb_impl::check_env(const char *package, const char *err_msg = "")
+ verilog_axi_bb_impl::check_env(const char *package, const char *err_msg)
{
Shell_cmd bash;
bash.exec((std::string("which ") + package).c_str());
if (bash.get_msg(0) == "") {
- if (err_msg != "") {
+
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% package
diff --git a/lib/verilog_axi_bb_impl.h b/lib/verilog_axi_bb_impl.h
index 0cda82e..add1b69 100644
--- a/lib/verilog_axi_bb_impl.h
+++ b/lib/verilog_axi_bb_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H
#define INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H
-#include
+#include
#include
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define SLASH "/"
diff --git a/lib/verilog_axi_cc_impl.cc b/lib/verilog_axi_cc_impl.cc
index 009d9b4..e7dc61e 100644
--- a/lib/verilog_axi_cc_impl.cc
+++ b/lib/verilog_axi_cc_impl.cc
@@ -30,10 +30,10 @@
#include
-#include "verilog/constants.h"
+#include "gnuradio/verilog/constants.h"
-#include "verilog/Shell_cmd.h"
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shell_cmd.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk"
#define CPP_TEMPLATE_NAME "axi_module.cpp"
@@ -143,7 +143,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->generate_so();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -165,7 +165,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->load_lib();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -242,8 +242,8 @@ namespace gr {
// Do <+signal processing+>
- unsigned int input_i;
- unsigned int output_i;
+ int input_i;
+ int output_i;
for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];)
{
unsigned char status_code;
@@ -251,7 +251,7 @@ namespace gr {
try {
status_code =
this->sim(in[input_i], out[output_i], this->main_time);
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -355,7 +355,7 @@ namespace gr {
cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME;
cmd += std::string(" ") + " M_DIR=" + M_dir;
// cmd += verilator_options:
- cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options;
+ cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\"";
cmd += ENTER;
cmd += ENTER;
@@ -399,11 +399,11 @@ namespace gr {
}
bool
- verilog_axi_cc_impl::test_access(const char *filepath, const char *err_msg = "")
+ verilog_axi_cc_impl::test_access(const char *filepath, const char *err_msg)
{
if ( access(filepath, R_OK) == _EXIT_FAILURE ) {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% filepath
@@ -418,13 +418,13 @@ namespace gr {
}
bool
- verilog_axi_cc_impl::check_env(const char *package, const char *err_msg = "")
+ verilog_axi_cc_impl::check_env(const char *package, const char *err_msg)
{
Shell_cmd bash;
bash.exec((std::string("which ") + package).c_str());
if (bash.get_msg(0) == "") {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% package
diff --git a/lib/verilog_axi_cc_impl.h b/lib/verilog_axi_cc_impl.h
index 0de610f..f96496c 100644
--- a/lib/verilog_axi_cc_impl.h
+++ b/lib/verilog_axi_cc_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_CC_IMPL_H
#define INCLUDED_VERILOG_VERILOG_AXI_CC_IMPL_H
-#include
+#include
#include
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define SLASH "/"
diff --git a/lib/verilog_axi_ff_impl.cc b/lib/verilog_axi_ff_impl.cc
index 43c04cc..406b227 100644
--- a/lib/verilog_axi_ff_impl.cc
+++ b/lib/verilog_axi_ff_impl.cc
@@ -30,10 +30,10 @@
#include
-#include "verilog/constants.h"
+#include "gnuradio/verilog/constants.h"
-#include "verilog/Shell_cmd.h"
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shell_cmd.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk"
#define CPP_TEMPLATE_NAME "axi_module.cpp"
@@ -143,7 +143,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->generate_so();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -165,7 +165,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->load_lib();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -242,8 +242,8 @@ namespace gr {
// Do <+signal processing+>
- unsigned int input_i;
- unsigned int output_i;
+ int input_i;
+ int output_i;
for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];)
{
unsigned char status_code;
@@ -251,7 +251,7 @@ namespace gr {
try {
status_code =
this->sim(in[input_i], out[output_i], this->main_time);
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -355,7 +355,7 @@ namespace gr {
cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME;
cmd += std::string(" ") + " M_DIR=" + M_dir;
// cmd += verilator_options:
- cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options;
+ cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\"";
cmd += ENTER;
cmd += ENTER;
@@ -399,11 +399,11 @@ namespace gr {
}
bool
- verilog_axi_ff_impl::test_access(const char *filepath, const char *err_msg = "")
+ verilog_axi_ff_impl::test_access(const char *filepath, const char *err_msg)
{
if ( access(filepath, R_OK) == _EXIT_FAILURE ) {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% filepath
@@ -418,13 +418,13 @@ namespace gr {
}
bool
- verilog_axi_ff_impl::check_env(const char *package, const char *err_msg = "")
+ verilog_axi_ff_impl::check_env(const char *package, const char *err_msg)
{
Shell_cmd bash;
bash.exec((std::string("which ") + package).c_str());
if (bash.get_msg(0) == "") {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% package
diff --git a/lib/verilog_axi_ff_impl.h b/lib/verilog_axi_ff_impl.h
index 33f8fae..673db90 100644
--- a/lib/verilog_axi_ff_impl.h
+++ b/lib/verilog_axi_ff_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_FF_IMPL_H
#define INCLUDED_VERILOG_VERILOG_AXI_FF_IMPL_H
-#include
+#include
#include
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define SLASH "/"
diff --git a/lib/verilog_axi_ii_impl.cc b/lib/verilog_axi_ii_impl.cc
index 070b25d..e97aeb4 100644
--- a/lib/verilog_axi_ii_impl.cc
+++ b/lib/verilog_axi_ii_impl.cc
@@ -30,10 +30,10 @@
#include
-#include "verilog/constants.h"
+#include "gnuradio/verilog/constants.h"
-#include "verilog/Shell_cmd.h"
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shell_cmd.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk"
#define CPP_TEMPLATE_NAME "axi_module.cpp"
@@ -143,7 +143,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->generate_so();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -165,7 +165,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->load_lib();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -242,8 +242,8 @@ namespace gr {
// Do <+signal processing+>
- unsigned int input_i;
- unsigned int output_i;
+ int input_i;
+ int output_i;
for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];)
{
unsigned char status_code;
@@ -251,7 +251,7 @@ namespace gr {
try {
status_code =
this->sim(in[input_i], out[output_i], this->main_time);
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -355,7 +355,7 @@ namespace gr {
cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME;
cmd += std::string(" ") + " M_DIR=" + M_dir;
// cmd += verilator_options:
- cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options;
+ cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\"";
cmd += ENTER;
cmd += ENTER;
@@ -399,11 +399,11 @@ namespace gr {
}
bool
- verilog_axi_ii_impl::test_access(const char *filepath, const char *err_msg = "")
+ verilog_axi_ii_impl::test_access(const char *filepath, const char *err_msg)
{
if ( access(filepath, R_OK) == _EXIT_FAILURE ) {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% filepath
@@ -418,13 +418,13 @@ namespace gr {
}
bool
- verilog_axi_ii_impl::check_env(const char *package, const char *err_msg = "")
+ verilog_axi_ii_impl::check_env(const char *package, const char *err_msg)
{
Shell_cmd bash;
bash.exec((std::string("which ") + package).c_str());
if (bash.get_msg(0) == "") {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% package
diff --git a/lib/verilog_axi_ii_impl.h b/lib/verilog_axi_ii_impl.h
index 13e0c4a..bb9091c 100644
--- a/lib/verilog_axi_ii_impl.h
+++ b/lib/verilog_axi_ii_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_II_IMPL_H
#define INCLUDED_VERILOG_VERILOG_AXI_II_IMPL_H
-#include
+#include
#include
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define SLASH "/"
diff --git a/lib/verilog_axi_ss_impl.cc b/lib/verilog_axi_ss_impl.cc
index 309bf89..7d161b6 100644
--- a/lib/verilog_axi_ss_impl.cc
+++ b/lib/verilog_axi_ss_impl.cc
@@ -30,10 +30,10 @@
#include
-#include "verilog/constants.h"
+#include "gnuradio/verilog/constants.h"
-#include "verilog/Shell_cmd.h"
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shell_cmd.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk"
#define CPP_TEMPLATE_NAME "axi_module.cpp"
@@ -143,7 +143,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->generate_so();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -165,7 +165,7 @@ namespace gr {
gr::thread::scoped_lock lock(this->vl_mutex);
this->load_lib();
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -242,8 +242,8 @@ namespace gr {
// Do <+signal processing+>
- unsigned int input_i;
- unsigned int output_i;
+ int input_i;
+ int output_i;
for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];)
{
unsigned char status_code;
@@ -251,7 +251,7 @@ namespace gr {
try {
status_code =
this->sim(in[input_i], out[output_i], this->main_time);
- } catch (std::runtime_error) {
+ } catch (std::runtime_error const&) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% this->verilog_module_path.c_str()
@@ -355,7 +355,7 @@ namespace gr {
cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME;
cmd += std::string(" ") + " M_DIR=" + M_dir;
// cmd += verilator_options:
- cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options;
+ cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\"";
cmd += ENTER;
cmd += ENTER;
@@ -399,11 +399,11 @@ namespace gr {
}
bool
- verilog_axi_ss_impl::test_access(const char *filepath, const char *err_msg = "")
+ verilog_axi_ss_impl::test_access(const char *filepath, const char *err_msg)
{
if ( access(filepath, R_OK) == _EXIT_FAILURE ) {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% filepath
@@ -418,13 +418,13 @@ namespace gr {
}
bool
- verilog_axi_ss_impl::check_env(const char *package, const char *err_msg = "")
+ verilog_axi_ss_impl::check_env(const char *package, const char *err_msg)
{
Shell_cmd bash;
bash.exec((std::string("which ") + package).c_str());
if (bash.get_msg(0) == "") {
- if (err_msg != "") {
+ if (err_msg != NULL) {
GR_LOG_ERROR(d_logger,
boost::format("%s: %s")
% package
diff --git a/lib/verilog_axi_ss_impl.h b/lib/verilog_axi_ss_impl.h
index 69d7aac..c5dc35c 100644
--- a/lib/verilog_axi_ss_impl.h
+++ b/lib/verilog_axi_ss_impl.h
@@ -21,10 +21,10 @@
#ifndef INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H
#define INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H
-#include
+#include
#include
-#include "verilog/Shared_lib.h"
+#include "gnuradio/verilog/Shared_lib.h"
#define SLASH "/"
diff --git a/python/__init__.py b/python/__init__.py
deleted file mode 100644
index cdbeadb..0000000
--- a/python/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Copyright 2008,2009 Free Software Foundation, Inc.
-#
-# This application is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# This application is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-
-# The presence of this file turns this directory into a Python package
-
-'''
-This is the GNU Radio VERILOG module. Place your Python package
-description here (python/__init__.py).
-'''
-from __future__ import unicode_literals
-
-# import swig generated symbols into the verilog namespace
-from .verilog_swig import *
-
-# import any pure python here
-#
diff --git a/python/__init__.pyc b/python/__init__.pyc
deleted file mode 100644
index ffbfa26..0000000
Binary files a/python/__init__.pyc and /dev/null differ
diff --git a/python/build_utils.py b/python/build_utils.py
deleted file mode 100644
index cf58a97..0000000
--- a/python/build_utils.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#
-# Copyright 2004,2009,2012 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-#
-
-"""Misc utilities used at build time
-"""
-
-import re, os, os.path
-from build_utils_codes import *
-
-
-# set srcdir to the directory that contains Makefile.am
-try:
- srcdir = os.environ['srcdir']
-except KeyError, e:
- srcdir = "."
-srcdir = srcdir + '/'
-
-# set do_makefile to either true or false dependeing on the environment
-try:
- if os.environ['do_makefile'] == '0':
- do_makefile = False
- else:
- do_makefile = True
-except KeyError, e:
- do_makefile = False
-
-# set do_sources to either true or false dependeing on the environment
-try:
- if os.environ['do_sources'] == '0':
- do_sources = False
- else:
- do_sources = True
-except KeyError, e:
- do_sources = True
-
-name_dict = {}
-
-def log_output_name (name):
- (base, ext) = os.path.splitext (name)
- ext = ext[1:] # drop the leading '.'
-
- entry = name_dict.setdefault (ext, [])
- entry.append (name)
-
-def open_and_log_name (name, dir):
- global do_sources
- if do_sources:
- f = open (name, dir)
- else:
- f = None
- log_output_name (name)
- return f
-
-def expand_template (d, template_filename, extra = ""):
- '''Given a dictionary D and a TEMPLATE_FILENAME, expand template into output file
- '''
- global do_sources
- output_extension = extract_extension (template_filename)
- template = open_src (template_filename, 'r')
- output_name = d['NAME'] + extra + '.' + output_extension
- log_output_name (output_name)
- if do_sources:
- output = open (output_name, 'w')
- do_substitution (d, template, output)
- output.close ()
- template.close ()
-
-def output_glue (dirname):
- output_makefile_fragment ()
- output_ifile_include (dirname)
-
-def output_makefile_fragment ():
- global do_makefile
- if not do_makefile:
- return
-# overwrite the source, which must be writable; this should have been
-# checked for beforehand in the top-level Makefile.gen.gen .
- f = open (os.path.join (os.environ.get('gendir', os.environ.get('srcdir', '.')), 'Makefile.gen'), 'w')
- f.write ('#\n# This file is machine generated. All edits will be overwritten\n#\n')
- output_subfrag (f, 'h')
- output_subfrag (f, 'i')
- output_subfrag (f, 'cc')
- f.close ()
-
-def output_ifile_include (dirname):
- global do_sources
- if do_sources:
- f = open ('%s_generated.i' % (dirname,), 'w')
- f.write ('//\n// This file is machine generated. All edits will be overwritten\n//\n')
- files = name_dict.setdefault ('i', [])
- files.sort ()
- f.write ('%{\n')
- for file in files:
- f.write ('#include <%s>\n' % (file[0:-1] + 'h',))
- f.write ('%}\n\n')
- for file in files:
- f.write ('%%include <%s>\n' % (file,))
-
-def output_subfrag (f, ext):
- files = name_dict.setdefault (ext, [])
- files.sort ()
- f.write ("GENERATED_%s =" % (ext.upper ()))
- for file in files:
- f.write (" \\\n\t%s" % (file,))
- f.write ("\n\n")
-
-def extract_extension (template_name):
- # template name is something like: GrFIRfilterXXX.h.t
- # we return everything between the penultimate . and .t
- mo = re.search (r'\.([a-z]+)\.t$', template_name)
- if not mo:
- raise ValueError, "Incorrectly formed template_name '%s'" % (template_name,)
- return mo.group (1)
-
-def open_src (name, mode):
- global srcdir
- return open (os.path.join (srcdir, name), mode)
-
-def do_substitution (d, in_file, out_file):
- def repl (match_obj):
- key = match_obj.group (1)
- # print key
- return d[key]
-
- inp = in_file.read ()
- out = re.sub (r"@([a-zA-Z0-9_]+)@", repl, inp)
- out_file.write (out)
-
-
-
-copyright = '''/* -*- c++ -*- */
-/*
- * Copyright 2003,2004 Free Software Foundation, Inc.
- *
- * This file is part of GNU Radio
- *
- * GNU Radio is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3, or (at your option)
- * any later version.
- *
- * GNU Radio is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU Radio; see the file COPYING. If not, write to
- * the Free Software Foundation, Inc., 51 Franklin Street,
- * Boston, MA 02110-1301, USA.
- */
-'''
-
-def is_complex (code3):
- if i_code (code3) == 'c' or o_code (code3) == 'c':
- return '1'
- else:
- return '0'
-
-
-def standard_dict (name, code3, package='gr'):
- d = {}
- d['NAME'] = name
- d['NAME_IMPL'] = name+'_impl'
- d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper())
- d['GUARD_NAME_IMPL'] = 'INCLUDED_%s_%s_IMPL_H' % (package.upper(), name.upper())
- d['BASE_NAME'] = re.sub ('^' + package + '_', '', name)
- d['SPTR_NAME'] = '%s_sptr' % name
- d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten'
- d['COPYRIGHT'] = copyright
- d['TYPE'] = i_type (code3)
- d['I_TYPE'] = i_type (code3)
- d['O_TYPE'] = o_type (code3)
- d['TAP_TYPE'] = tap_type (code3)
- d['IS_COMPLEX'] = is_complex (code3)
- return d
-
-
-def standard_dict2 (name, code3, package):
- d = {}
- d['NAME'] = name
- d['BASE_NAME'] = name
- d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper())
- d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten'
- d['COPYRIGHT'] = copyright
- d['TYPE'] = i_type (code3)
- d['I_TYPE'] = i_type (code3)
- d['O_TYPE'] = o_type (code3)
- d['TAP_TYPE'] = tap_type (code3)
- d['IS_COMPLEX'] = is_complex (code3)
- return d
-
-def standard_impl_dict2 (name, code3, package):
- d = {}
- d['NAME'] = name
- d['IMPL_NAME'] = name
- d['BASE_NAME'] = name.rstrip("impl").rstrip("_")
- d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper())
- d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten'
- d['COPYRIGHT'] = copyright
- d['FIR_TYPE'] = "fir_filter_" + code3
- d['CFIR_TYPE'] = "fir_filter_" + code3[0:2] + 'c'
- d['TYPE'] = i_type (code3)
- d['I_TYPE'] = i_type (code3)
- d['O_TYPE'] = o_type (code3)
- d['TAP_TYPE'] = tap_type (code3)
- d['IS_COMPLEX'] = is_complex (code3)
- return d
diff --git a/python/build_utils.pyc b/python/build_utils.pyc
deleted file mode 100644
index 68dff47..0000000
Binary files a/python/build_utils.pyc and /dev/null differ
diff --git a/python/build_utils_codes.py b/python/build_utils_codes.py
deleted file mode 100644
index 9ea96ba..0000000
--- a/python/build_utils_codes.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-# Copyright 2004 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-#
-
-def i_code (code3):
- return code3[0]
-
-def o_code (code3):
- if len (code3) >= 2:
- return code3[1]
- else:
- return code3[0]
-
-def tap_code (code3):
- if len (code3) >= 3:
- return code3[2]
- else:
- return code3[0]
-
-def i_type (code3):
- return char_to_type[i_code (code3)]
-
-def o_type (code3):
- return char_to_type[o_code (code3)]
-
-def tap_type (code3):
- return char_to_type[tap_code (code3)]
-
-
-char_to_type = {}
-char_to_type['s'] = 'short'
-char_to_type['i'] = 'int'
-char_to_type['f'] = 'float'
-char_to_type['c'] = 'gr_complex'
-char_to_type['b'] = 'unsigned char'
diff --git a/python/build_utils_codes.pyc b/python/build_utils_codes.pyc
deleted file mode 100644
index f9a9434..0000000
Binary files a/python/build_utils_codes.pyc and /dev/null differ
diff --git a/python/qa_verilog_axi_cc.py b/python/qa_verilog_axi_cc.py
deleted file mode 100755
index 9c27f38..0000000
--- a/python/qa_verilog_axi_cc.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2019 <+YOU OR YOUR COMPANY+>.
-#
-# This is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-#
-
-from gnuradio import gr, gr_unittest
-from gnuradio import blocks
-import verilog_swig as verilog
-
-class qa_verilog_axi_cc (gr_unittest.TestCase):
-
- def setUp (self):
- self.tb = gr.top_block ()
-
- def tearDown (self):
- self.tb = None
-
- def test_001_t (self):
- # set up fg
- self.tb.run ()
- # check data
-
-
-if __name__ == '__main__':
- gr_unittest.run(qa_verilog_axi_cc, "qa_verilog_axi_cc.xml")
diff --git a/python/CMakeLists.txt b/python/verilog/CMakeLists.txt
similarity index 53%
rename from python/CMakeLists.txt
rename to python/verilog/CMakeLists.txt
index 7401fbe..229dc19 100644
--- a/python/CMakeLists.txt
+++ b/python/verilog/CMakeLists.txt
@@ -1,21 +1,10 @@
# Copyright 2011 Free Software Foundation, Inc.
#
-# This file is part of GNU Radio
+# This file was generated by gr_modtool, a tool from the GNU Radio framework
+# This file is a part of gr-verilog
#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
+# SPDX-License-Identifier: GPL-3.0-or-later
#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
########################################################################
# Include python install macros
@@ -25,13 +14,15 @@ if(NOT PYTHONINTERP_FOUND)
return()
endif()
+add_subdirectory(bindings)
+
########################################################################
# Install python sources
########################################################################
GR_PYTHON_INSTALL(
FILES
__init__.py
- DESTINATION ${GR_PYTHON_DIR}/verilog
+ DESTINATION ${GR_PYTHON_DIR}/gnuradio/verilog
)
########################################################################
@@ -40,9 +31,16 @@ GR_PYTHON_INSTALL(
include(GrTest)
set(GR_TEST_TARGET_DEPS gnuradio-verilog)
-set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig)
+
+# Create a package directory that tests can import. It includes everything
+# from `python/`.
+add_custom_target(
+ copy_module_for_tests ALL
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_BINARY_DIR}/test_modules/gnuradio/verilog/
+)
+
+GR_ADD_TEST(qa_verilog_axi_bb ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_bb.py)
+GR_ADD_TEST(qa_verilog_axi_ss ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ss.py)
GR_ADD_TEST(qa_verilog_axi_ii ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ii.py)
GR_ADD_TEST(qa_verilog_axi_ff ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ff.py)
-GR_ADD_TEST(qa_verilog_axi_ss ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ss.py)
-GR_ADD_TEST(qa_verilog_axi_cc ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_cc.py)
-GR_ADD_TEST(qa_verilog_axi_bb ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ss.py)
diff --git a/python/verilog/__init__.py b/python/verilog/__init__.py
new file mode 100644
index 0000000..89f4275
--- /dev/null
+++ b/python/verilog/__init__.py
@@ -0,0 +1,23 @@
+#
+# Copyright 2008,2009 Free Software Foundation, Inc.
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+# The presence of this file turns this directory into a Python package
+
+'''
+This is the GNU Radio VERILOG module. Place your Python package
+description here (python/__init__.py).
+'''
+import os
+
+# import pybind11 generated symbols into the verilog namespace
+try:
+ # this might fail if the module is python-only
+ from .verilog_python import *
+except ModuleNotFoundError:
+ pass
+
+# import any pure python here
+#
diff --git a/python/verilog/bindings/CMakeLists.txt b/python/verilog/bindings/CMakeLists.txt
new file mode 100644
index 0000000..64b6092
--- /dev/null
+++ b/python/verilog/bindings/CMakeLists.txt
@@ -0,0 +1,51 @@
+# Copyright 2020 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+########################################################################
+# Check if there is C++ code at all
+########################################################################
+if(NOT verilog_sources)
+ MESSAGE(STATUS "No C++ sources... skipping python bindings")
+ return()
+endif(NOT verilog_sources)
+
+########################################################################
+# Check for pygccxml
+########################################################################
+GR_PYTHON_CHECK_MODULE_RAW(
+ "pygccxml"
+ "import pygccxml"
+ PYGCCXML_FOUND
+ )
+
+include(GrPybind)
+
+########################################################################
+# Python Bindings
+########################################################################
+
+list(APPEND verilog_python_files
+ verilog_axi_bb_python.cc
+ verilog_axi_ss_python.cc
+ verilog_axi_ii_python.cc
+ verilog_axi_ff_python.cc
+ python_bindings.cc)
+
+GR_PYBIND_MAKE_OOT(verilog
+ ../../..
+ gr::verilog
+ "${verilog_python_files}")
+
+# copy in bindings .so file for use in QA test module
+add_custom_target(
+ copy_bindings_for_tests ALL
+ COMMAND
+ ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_BINARY_DIR}/*.so"
+ ${CMAKE_BINARY_DIR}/test_modules/gnuradio/verilog/
+ DEPENDS verilog_python)
+
+install(TARGETS verilog_python DESTINATION ${GR_PYTHON_DIR}/gnuradio/verilog COMPONENT pythonapi)
diff --git a/python/verilog/bindings/README.md b/python/verilog/bindings/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/python/verilog/bindings/bind_oot_file.py b/python/verilog/bindings/bind_oot_file.py
new file mode 100644
index 0000000..5bc3ff6
--- /dev/null
+++ b/python/verilog/bindings/bind_oot_file.py
@@ -0,0 +1,58 @@
+import warnings
+import argparse
+import os
+from gnuradio.bindtool import BindingGenerator
+import pathlib
+import sys
+import tempfile
+
+parser = argparse.ArgumentParser(description='Bind a GR Out of Tree Block')
+parser.add_argument('--module', type=str,
+ help='Name of gr module containing file to bind (e.g. fft digital analog)')
+
+parser.add_argument('--output_dir', default=tempfile.gettempdir(),
+ help='Output directory of generated bindings')
+parser.add_argument('--prefix', help='Prefix of Installed GNU Radio')
+parser.add_argument('--src', help='Directory of gnuradio source tree',
+ default=os.path.dirname(os.path.abspath(__file__)) + '/../../..')
+
+parser.add_argument(
+ '--filename', help="File to be parsed")
+
+parser.add_argument(
+ '--defines', help='Set additional defines for precompiler', default=(), nargs='*')
+parser.add_argument(
+ '--include', help='Additional Include Dirs, separated', default=(), nargs='*')
+
+parser.add_argument(
+ '--status', help='Location of output file for general status (used during cmake)', default=None
+)
+parser.add_argument(
+ '--flag_automatic', default='0'
+)
+parser.add_argument(
+ '--flag_pygccxml', default='0'
+)
+
+args = parser.parse_args()
+
+prefix = args.prefix
+output_dir = args.output_dir
+defines = tuple(','.join(args.defines).split(','))
+includes = ','.join(args.include)
+name = args.module
+
+namespace = ['gr', name]
+prefix_include_root = name
+
+
+with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+ bg = BindingGenerator(prefix, namespace,
+ prefix_include_root, output_dir, define_symbols=defines, addl_includes=includes,
+ catch_exceptions=False, write_json_output=False, status_output=args.status,
+ flag_automatic=True if args.flag_automatic.lower() in [
+ '1', 'true'] else False,
+ flag_pygccxml=True if args.flag_pygccxml.lower() in ['1', 'true'] else False)
+ bg.gen_file_binding(args.filename)
diff --git a/python/verilog/bindings/docstrings/README.md b/python/verilog/bindings/docstrings/README.md
new file mode 100644
index 0000000..a506c22
--- /dev/null
+++ b/python/verilog/bindings/docstrings/README.md
@@ -0,0 +1 @@
+This directory stores templates for docstrings that are scraped from the include header files for each block
diff --git a/python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h
new file mode 100644
index 0000000..a0f29b8
--- /dev/null
+++ b/python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr,verilog, __VA_ARGS__ )
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+
+ static const char *__doc_gr_verilog_axi_bb = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_bb = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_bb_make = R"doc()doc";
+
+
diff --git a/python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h
new file mode 100644
index 0000000..e35d22c
--- /dev/null
+++ b/python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr,verilog, __VA_ARGS__ )
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+
+ static const char *__doc_gr_verilog_axi_ff = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_ff = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_ff_make = R"doc()doc";
+
+
diff --git a/python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h
new file mode 100644
index 0000000..3be7a93
--- /dev/null
+++ b/python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr,verilog, __VA_ARGS__ )
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+
+ static const char *__doc_gr_verilog_axi_ii = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_ii = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_ii_make = R"doc()doc";
+
+
diff --git a/python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h
new file mode 100644
index 0000000..aec5663
--- /dev/null
+++ b/python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+#include "pydoc_macros.h"
+#define D(...) DOC(gr,verilog, __VA_ARGS__ )
+/*
+ This file contains placeholders for docstrings for the Python bindings.
+ Do not edit! These were automatically extracted during the binding process
+ and will be overwritten during the build process
+ */
+
+
+
+ static const char *__doc_gr_verilog_axi_ss = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_ss = R"doc()doc";
+
+
+ static const char *__doc_gr_verilog_verilog_axi_ss_make = R"doc()doc";
+
+
diff --git a/python/verilog/bindings/header_utils.py b/python/verilog/bindings/header_utils.py
new file mode 100644
index 0000000..7c26fe0
--- /dev/null
+++ b/python/verilog/bindings/header_utils.py
@@ -0,0 +1,80 @@
+# Utilities for reading values in header files
+
+from argparse import ArgumentParser
+import re
+
+
+class PybindHeaderParser:
+ def __init__(self, pathname):
+ with open(pathname, 'r') as f:
+ self.file_txt = f.read()
+
+ def get_flag_automatic(self):
+ # p = re.compile(r'BINDTOOL_GEN_AUTOMATIC\(([^\s])\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_GEN_AUTOMATIC\(([^\s])\)', self.file_txt)
+ if (m and m.group(1) == '1'):
+ return True
+ else:
+ return False
+
+ def get_flag_pygccxml(self):
+ # p = re.compile(r'BINDTOOL_USE_PYGCCXML\(([^\s])\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_USE_PYGCCXML\(([^\s])\)', self.file_txt)
+ if (m and m.group(1) == '1'):
+ return True
+ else:
+ return False
+
+ def get_header_filename(self):
+ # p = re.compile(r'BINDTOOL_HEADER_FILE\(([^\s]*)\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_HEADER_FILE\(([^\s]*)\)', self.file_txt)
+ if (m):
+ return m.group(1)
+ else:
+ return None
+
+ def get_header_file_hash(self):
+ # p = re.compile(r'BINDTOOL_HEADER_FILE_HASH\(([^\s]*)\)')
+ # m = p.search(self.file_txt)
+ m = re.search(r'BINDTOOL_HEADER_FILE_HASH\(([^\s]*)\)', self.file_txt)
+ if (m):
+ return m.group(1)
+ else:
+ return None
+
+ def get_flags(self):
+ return f'{self.get_flag_automatic()};{self.get_flag_pygccxml()};{self.get_header_filename()};{self.get_header_file_hash()};'
+
+
+def argParse():
+ """Parses commandline args."""
+ desc = 'Reads the parameters from the comment block in the pybind files'
+ parser = ArgumentParser(description=desc)
+
+ parser.add_argument("function", help="Operation to perform on comment block of pybind file", choices=[
+ "flag_auto", "flag_pygccxml", "header_filename", "header_file_hash", "all"])
+ parser.add_argument(
+ "pathname", help="Pathname of pybind c++ file to read, e.g. blockname_python.cc")
+
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ # Parse command line options and set up doxyxml.
+ args = argParse()
+
+ pbhp = PybindHeaderParser(args.pathname)
+
+ if args.function == "flag_auto":
+ print(pbhp.get_flag_automatic())
+ elif args.function == "flag_pygccxml":
+ print(pbhp.get_flag_pygccxml())
+ elif args.function == "header_filename":
+ print(pbhp.get_header_filename())
+ elif args.function == "header_file_hash":
+ print(pbhp.get_header_file_hash())
+ elif args.function == "all":
+ print(pbhp.get_flags())
diff --git a/python/verilog/bindings/python_bindings.cc b/python/verilog/bindings/python_bindings.cc
new file mode 100644
index 0000000..174f0d5
--- /dev/null
+++ b/python/verilog/bindings/python_bindings.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2020 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+#include
+
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#include
+
+namespace py = pybind11;
+
+// Headers for binding functions
+/**************************************/
+// The following comment block is used for
+// gr_modtool to insert function prototypes
+// Please do not delete
+/**************************************/
+// BINDING_FUNCTION_PROTOTYPES(
+ void bind_verilog_axi_bb(py::module& m);
+ void bind_verilog_axi_ss(py::module& m);
+ void bind_verilog_axi_ii(py::module& m);
+ void bind_verilog_axi_ff(py::module& m);
+// ) END BINDING_FUNCTION_PROTOTYPES
+
+
+// We need this hack because import_array() returns NULL
+// for newer Python versions.
+// This function is also necessary because it ensures access to the C API
+// and removes a warning.
+void* init_numpy()
+{
+ import_array();
+ return NULL;
+}
+
+PYBIND11_MODULE(verilog_python, m)
+{
+ // Initialize the numpy C API
+ // (otherwise we will see segmentation faults)
+ init_numpy();
+
+ // Allow access to base block methods
+ py::module::import("gnuradio.gr");
+
+ /**************************************/
+ // The following comment block is used for
+ // gr_modtool to insert binding function calls
+ // Please do not delete
+ /**************************************/
+ // BINDING_FUNCTION_CALLS(
+ bind_verilog_axi_bb(m);
+ bind_verilog_axi_ss(m);
+ bind_verilog_axi_ii(m);
+ bind_verilog_axi_ff(m);
+ // ) END BINDING_FUNCTION_CALLS
+}
diff --git a/python/verilog/bindings/verilog_axi_bb_python.cc b/python/verilog/bindings/verilog_axi_bb_python.cc
new file mode 100644
index 0000000..59edfd8
--- /dev/null
+++ b/python/verilog/bindings/verilog_axi_bb_python.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(1) */
+/* BINDTOOL_USE_PYGCCXML(1) */
+/* BINDTOOL_HEADER_FILE(verilog_axi_bb.h) */
+/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_verilog_axi_bb(py::module& m)
+{
+
+ using verilog_axi_bb = gr::verilog::verilog_axi_bb;
+
+
+ py::class_>(m, "verilog_axi_bb", D(verilog_axi_bb))
+
+ .def(py::init(&verilog_axi_bb::make),
+ D(verilog_axi_bb,make)
+ )
+
+
+
+
+ ;
+
+
+
+
+}
+
+
+
+
+
+
+
+
diff --git a/python/verilog/bindings/verilog_axi_ff_python.cc b/python/verilog/bindings/verilog_axi_ff_python.cc
new file mode 100644
index 0000000..0201ead
--- /dev/null
+++ b/python/verilog/bindings/verilog_axi_ff_python.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(1) */
+/* BINDTOOL_USE_PYGCCXML(1) */
+/* BINDTOOL_HEADER_FILE(verilog_axi_ff.h) */
+/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_verilog_axi_ff(py::module& m)
+{
+
+ using verilog_axi_ff = gr::verilog::verilog_axi_ff;
+
+
+ py::class_>(m, "verilog_axi_ff", D(verilog_axi_ff))
+
+ .def(py::init(&verilog_axi_ff::make),
+ D(verilog_axi_ff,make)
+ )
+
+
+
+
+ ;
+
+
+
+
+}
+
+
+
+
+
+
+
+
diff --git a/python/verilog/bindings/verilog_axi_ii_python.cc b/python/verilog/bindings/verilog_axi_ii_python.cc
new file mode 100644
index 0000000..0421f52
--- /dev/null
+++ b/python/verilog/bindings/verilog_axi_ii_python.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(1) */
+/* BINDTOOL_USE_PYGCCXML(1) */
+/* BINDTOOL_HEADER_FILE(verilog_axi_ii.h) */
+/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_verilog_axi_ii(py::module& m)
+{
+
+ using verilog_axi_ii = gr::verilog::verilog_axi_ii;
+
+
+ py::class_>(m, "verilog_axi_ii", D(verilog_axi_ii))
+
+ .def(py::init(&verilog_axi_ii::make),
+ D(verilog_axi_ii,make)
+ )
+
+
+
+
+ ;
+
+
+
+
+}
+
+
+
+
+
+
+
+
diff --git a/python/verilog/bindings/verilog_axi_ss_python.cc b/python/verilog/bindings/verilog_axi_ss_python.cc
new file mode 100644
index 0000000..69c0d3f
--- /dev/null
+++ b/python/verilog/bindings/verilog_axi_ss_python.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ *
+ */
+
+/***********************************************************************************/
+/* This file is automatically generated using bindtool and can be manually edited */
+/* The following lines can be configured to regenerate this file during cmake */
+/* If manual edits are made, the following tags should be modified accordingly. */
+/* BINDTOOL_GEN_AUTOMATIC(1) */
+/* BINDTOOL_USE_PYGCCXML(1) */
+/* BINDTOOL_HEADER_FILE(verilog_axi_ss.h) */
+/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */
+/***********************************************************************************/
+
+#include
+#include
+#include
+
+namespace py = pybind11;
+
+#include
+// pydoc.h is automatically generated in the build directory
+#include
+
+void bind_verilog_axi_ss(py::module& m)
+{
+
+ using verilog_axi_ss = gr::verilog::verilog_axi_ss;
+
+
+ py::class_>(m, "verilog_axi_ss", D(verilog_axi_ss))
+
+ .def(py::init(&verilog_axi_ss::make),
+ D(verilog_axi_ss,make)
+ )
+
+
+
+
+ ;
+
+
+
+
+}
+
+
+
+
+
+
+
+
diff --git a/python/qa_verilog_axi_bb.py b/python/verilog/qa_verilog_axi_bb.py
similarity index 81%
rename from python/qa_verilog_axi_bb.py
rename to python/verilog/qa_verilog_axi_bb.py
index 07460fd..9ed152f 100755
--- a/python/qa_verilog_axi_bb.py
+++ b/python/verilog/qa_verilog_axi_bb.py
@@ -1,30 +1,37 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-#
+#
# Copyright 2019 <+YOU OR YOUR COMPANY+>.
-#
+#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
-#
+#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
-#
+#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
-import verilog_swig as verilog
import os
-class qa_verilog_axi_ss (gr_unittest.TestCase):
+try:
+ from gnuradio import verilog
+except ImportError:
+ import sys
+ dirname, filename = os.path.split(os.path.abspath(__file__))
+ sys.path.append(os.path.join(dirname, "bindings"))
+ from gnuradio import verilog
+
+class qa_verilog_axi_bb (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
@@ -36,10 +43,10 @@ def test_001_t (self):
# set up fg
src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29)
expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29)
- src = blocks.vector_source_s(src_data)
+ src = blocks.vector_source_b(src_data)
path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.'
vl = verilog.verilog_axi_bb(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0)
- dst = blocks.vector_sink_s()
+ dst = blocks.vector_sink_b()
self.tb.connect(src, vl)
self.tb.connect(vl, dst)
@@ -54,10 +61,10 @@ def test_002_t (self):
# set up fg
src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21)
expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42)
- src = blocks.vector_source_s(src_data)
+ src = blocks.vector_source_b(src_data)
path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.'
vl = verilog.verilog_axi_bb(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0)
- dst = blocks.vector_sink_s()
+ dst = blocks.vector_sink_b()
self.tb.connect(src, vl)
self.tb.connect(vl, dst)
@@ -70,4 +77,4 @@ def test_002_t (self):
if __name__ == '__main__':
- gr_unittest.run(qa_verilog_axi_ss, "qa_verilog_axi_ss.xml")
+ gr_unittest.run(qa_verilog_axi_bb, "qa_verilog_axi_bb.xml")
diff --git a/python/qa_verilog_axi_ff.py b/python/verilog/qa_verilog_axi_ff.py
similarity index 92%
rename from python/qa_verilog_axi_ff.py
rename to python/verilog/qa_verilog_axi_ff.py
index fce0904..49fb08d 100755
--- a/python/qa_verilog_axi_ff.py
+++ b/python/verilog/qa_verilog_axi_ff.py
@@ -1,29 +1,36 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-#
+#
# Copyright 2019 <+YOU OR YOUR COMPANY+>.
-#
+#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
-#
+#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
-#
+#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
-import verilog_swig as verilog
import os
+try:
+ from gnuradio import verilog
+except ImportError:
+ import sys
+ dirname, filename = os.path.split(os.path.abspath(__file__))
+ sys.path.append(os.path.join(dirname, "bindings"))
+ from gnuradio import verilog
+
class qa_verilog_axi_ff (gr_unittest.TestCase):
def setUp (self):
@@ -45,7 +52,7 @@ def test_001_t (self):
self.tb.connect(vl, dst)
self.tb.run()
# check data
-
+
result_data = dst.data()
print (expected_result)
diff --git a/python/qa_verilog_axi_ii.py b/python/verilog/qa_verilog_axi_ii.py
similarity index 91%
rename from python/qa_verilog_axi_ii.py
rename to python/verilog/qa_verilog_axi_ii.py
index b18c928..0081ae3 100755
--- a/python/qa_verilog_axi_ii.py
+++ b/python/verilog/qa_verilog_axi_ii.py
@@ -21,9 +21,16 @@
from gnuradio import gr, gr_unittest
from gnuradio import blocks
-import verilog_swig as verilog
import os
+try:
+ from gnuradio import verilog
+except ImportError:
+ import sys
+ dirname, filename = os.path.split(os.path.abspath(__file__))
+ sys.path.append(os.path.join(dirname, "bindings"))
+ from gnuradio import verilog
+
class qa_verilog_axi_ii (gr_unittest.TestCase):
def setUp (self):
diff --git a/python/qa_verilog_axi_ss.py b/python/verilog/qa_verilog_axi_ss.py
similarity index 91%
rename from python/qa_verilog_axi_ss.py
rename to python/verilog/qa_verilog_axi_ss.py
index 8ef667c..f2ed821 100755
--- a/python/qa_verilog_axi_ss.py
+++ b/python/verilog/qa_verilog_axi_ss.py
@@ -1,29 +1,36 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-#
+#
# Copyright 2019 <+YOU OR YOUR COMPANY+>.
-#
+#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
-#
+#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
-#
+#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
-import verilog_swig as verilog
import os
+try:
+ from gnuradio import verilog
+except ImportError:
+ import sys
+ dirname, filename = os.path.split(os.path.abspath(__file__))
+ sys.path.append(os.path.join(dirname, "bindings"))
+ from gnuradio import verilog
+
class qa_verilog_axi_ss (gr_unittest.TestCase):
def setUp (self):
diff --git a/python/testcases/double/double.v b/python/verilog/testcases/double/double.v
similarity index 100%
rename from python/testcases/double/double.v
rename to python/verilog/testcases/double/double.v
diff --git a/python/testcases/double/double_axi.v b/python/verilog/testcases/double/double_axi.v
similarity index 100%
rename from python/testcases/double/double_axi.v
rename to python/verilog/testcases/double/double_axi.v
diff --git a/python/testcases/passthru/saxi_passthru.v b/python/verilog/testcases/passthru/saxi_passthru.v
similarity index 100%
rename from python/testcases/passthru/saxi_passthru.v
rename to python/verilog/testcases/passthru/saxi_passthru.v
diff --git a/swig/CMakeLists.txt b/swig/CMakeLists.txt
deleted file mode 100644
index c4c2909..0000000
--- a/swig/CMakeLists.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# GNU Radio is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GNU Radio is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GNU Radio; see the file COPYING. If not, write to
-# the Free Software Foundation, Inc., 51 Franklin Street,
-# Boston, MA 02110-1301, USA.
-
-########################################################################
-# Check if there is C++ code at all
-########################################################################
-if(NOT verilog_sources)
- MESSAGE(STATUS "No C++ sources... skipping swig/")
- return()
-endif(NOT verilog_sources)
-
-########################################################################
-# Include swig generation macros
-########################################################################
-find_package(SWIG)
-find_package(PythonLibs)
-if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND)
- return()
-endif()
-include(GrSwig)
-include(GrPython)
-
-########################################################################
-# Setup swig generation
-########################################################################
-set(GR_SWIG_INCLUDE_DIRS $)
-set(GR_SWIG_TARGET_DEPS gnuradio::runtime_swig)
-
-set(GR_SWIG_LIBRARIES gnuradio-verilog)
-
-set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/verilog_swig_doc.i)
-set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include)
-
-GR_SWIG_MAKE(verilog_swig verilog_swig.i)
-
-########################################################################
-# Install the build swig module
-########################################################################
-GR_SWIG_INSTALL(TARGETS verilog_swig DESTINATION ${GR_PYTHON_DIR}/verilog)
-
-########################################################################
-# Install swig .i files for development
-########################################################################
-install(
- FILES
- verilog_swig.i
- ${CMAKE_CURRENT_BINARY_DIR}/verilog_swig_doc.i
- DESTINATION ${GR_INCLUDE_DIR}/verilog/swig
-)
diff --git a/swig/verilog_swig.i b/swig/verilog_swig.i
deleted file mode 100644
index d086329..0000000
--- a/swig/verilog_swig.i
+++ /dev/null
@@ -1,28 +0,0 @@
-/* -*- c++ -*- */
-
-#define VERILOG_API
-
-%include "gnuradio.i" // the common stuff
-
-//load generated python docstrings
-%include "verilog_swig_doc.i"
-
-%{
-#include "verilog/verilog_axi_ii.h"
-#include "verilog/verilog_axi_ff.h"
-#include "verilog/verilog_axi_ss.h"
-#include "verilog/verilog_axi_bb.h"
-#include "verilog/verilog_axi_cc.h"
-%}
-
-
-%include "verilog/verilog_axi_ii.h"
-GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_ii);
-%include "verilog/verilog_axi_ff.h"
-GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_ff);
-%include "verilog/verilog_axi_ss.h"
-GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_ss);
-%include "verilog/verilog_axi_bb.h"
-GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_bb);
-%include "verilog/verilog_axi_cc.h"
-GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_cc);