BIND 10 trac826, updated. 6cc92fea3f93aa21595757e203c627572e36d294 [trac826] update
BIND 10 source code commits
bind10-changes at lists.isc.org
Sat Oct 15 20:22:16 UTC 2011
The branch, trac826 has been updated
via 6cc92fea3f93aa21595757e203c627572e36d294 (commit)
from 7cf811c42d6a967ee2cce79012f6b649d0107941 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 6cc92fea3f93aa21595757e203c627572e36d294
Author: Francis Dupont <fdupont at isc.org>
Date: Sat Oct 15 22:21:52 2011 +0200
[trac826] update
update according to the last snapshot, src/lib/datasrc is not finished...
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 72 +-
Makefile.am | 4 +
README | 218 +---
WIN32-NOTES | 1 +
configure.ac | 14 +-
doc/Doxyfile | 2 +-
doc/guide/Makefile.am | 7 +-
doc/guide/bind10-guide.html | 171 ++-
doc/guide/bind10-guide.txt | 1201 ++++++++++++++++++
doc/guide/bind10-guide.xml | 125 ++-
doc/guide/bind10-messages.xml | 358 +++++-
ext/asio/asio/impl/error_code.ipp | 3 +
src/bin/auth/Makefile.am | 6 +
src/bin/auth/auth_messages.mes | 3 +
src/bin/auth/auth_srv.cc | 24 +
src/bin/auth/benchmarks/Makefile.am | 6 +
src/bin/auth/statistics.cc | 32 +-
src/bin/auth/statistics.h | 20 +
src/bin/auth/tests/Makefile.am | 7 +
src/bin/auth/tests/statistics_unittest.cc | 74 +-
src/bin/bind10/Makefile.am | 1 +
src/bin/bind10/bind10_messages.mes | 4 +
src/bin/bind10/bind10_src.py.in | 51 +-
src/bin/bind10/tests/bind10_test.py.in | 28 +-
src/bin/cfgmgr/plugins/Makefile.am | 11 +-
src/bin/dhcp6/Makefile.am | 13 +-
src/bin/dhcp6/dhcp6.h | 184 +++
src/bin/dhcp6/dhcp6_srv.cc | 199 +---
src/bin/dhcp6/dhcp6_srv.h | 38 -
src/bin/dhcp6/iface_mgr.cc | 99 +-
src/bin/dhcp6/iface_mgr.h | 9 +-
src/bin/dhcp6/pkt6.cc | 47 +
src/bin/dhcp6/pkt6.h | 62 +
src/bin/dhcp6/tests/Makefile.am | 13 +-
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 56 +-
src/bin/dhcp6/tests/dhcp6_test.py | 2 +-
src/bin/dhcp6/tests/iface_mgr_unittest.cc | 227 +---
src/bin/dhcp6/tests/pkt6_unittest.cc | 48 +
src/bin/resolver/tests/Makefile.am | 2 -
src/bin/stats/Makefile.am | 4 +-
src/bin/stats/b10-stats-httpd.8 | 6 +-
src/bin/stats/b10-stats-httpd.xml | 10 +-
src/bin/stats/b10-stats.8 | 4 -
src/bin/stats/b10-stats.xml | 6 -
src/bin/stats/stats-httpd-xsl.tpl | 1 +
src/bin/stats/stats-schema.spec | 86 --
src/bin/stats/stats.py.in | 589 +++++-----
src/bin/stats/stats.spec | 71 +-
src/bin/stats/stats_httpd.py.in | 280 +++--
src/bin/stats/stats_messages.mes | 21 +-
src/bin/stats/tests/Makefile.am | 10 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 782 +++++++-----
src/bin/stats/tests/b10-stats_test.py | 1197 +++++++++---------
src/bin/stats/tests/fake_select.py | 43 -
src/bin/stats/tests/fake_socket.py | 70 -
src/bin/stats/tests/fake_time.py | 47 -
src/bin/stats/tests/http/Makefile.am | 6 -
src/bin/stats/tests/http/server.py | 96 --
src/bin/stats/tests/isc/Makefile.am | 8 -
src/bin/stats/tests/isc/cc/Makefile.am | 7 -
src/bin/stats/tests/isc/cc/__init__.py | 1 -
src/bin/stats/tests/isc/cc/session.py | 156 ---
src/bin/stats/tests/isc/config/Makefile.am | 7 -
src/bin/stats/tests/isc/config/__init__.py | 1 -
src/bin/stats/tests/isc/config/ccsession.py | 249 ----
src/bin/stats/tests/isc/log/Makefile.am | 7 -
src/bin/stats/tests/isc/log/__init__.py | 33 -
src/bin/stats/tests/isc/util/Makefile.am | 7 -
src/bin/stats/tests/isc/util/process.py | 21 -
src/bin/stats/tests/test_utils.py | 364 ++++++
src/bin/stats/tests/testdata/Makefile.am | 1 -
src/bin/stats/tests/testdata/stats_test.spec | 19 -
src/bin/tests/Makefile.am | 2 +-
src/bin/xfrin/b10-xfrin.8 | 30 +-
src/bin/xfrin/b10-xfrin.xml | 22 +-
src/bin/xfrin/tests/Makefile.am | 7 +
src/bin/xfrin/tests/testdata/Makefile.am | 2 +
src/bin/xfrin/tests/testdata/example.com | 17 +
src/bin/xfrin/tests/testdata/example.com.sqlite3 | Bin 0 -> 11264 bytes
src/bin/xfrin/tests/xfrin_test.py | 1332 ++++++++++++++++++--
src/bin/xfrin/xfrin.py.in | 603 ++++++++--
src/bin/xfrin/xfrin.spec | 2 +-
src/bin/xfrin/xfrin_messages.mes | 56 +-
src/cppcheck-suppress.lst | 1 +
src/lib/Makefile.am | 6 +-
src/lib/bench/Makefile.am | 2 +-
src/lib/bench/tests/run_unittests.cc | 3 +-
src/lib/config/tests/Makefile.am | 2 +-
src/lib/datasrc/Makefile.am | 16 +-
src/lib/datasrc/client.h | 44 +
src/lib/datasrc/data_source.h | 12 +-
src/lib/datasrc/factory.cc | 127 ++
src/lib/datasrc/factory.h | 174 +++
src/lib/datasrc/memory_datasrc.cc | 161 +++-
src/lib/datasrc/memory_datasrc.h | 43 +-
src/lib/datasrc/sqlite3_accessor.cc | 106 ++-
src/lib/datasrc/sqlite3_accessor.h | 47 +-
src/lib/datasrc/tests/Makefile.am | 17 +-
src/lib/datasrc/tests/database_unittest.cc | 3 +-
src/lib/datasrc/tests/factory_unittest.cc | 175 +++
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 40 +-
src/lib/dns/Makefile.am | 4 +
src/lib/dns/python/message_python.cc | 6 +-
src/lib/dns/rdata/generic/detail/ds_like.h | 225 ++++
src/lib/dns/rdata/generic/detail/txt_like.h | 56 +-
src/lib/dns/rdata/generic/dlv_32769.cc | 121 ++
src/lib/dns/rdata/generic/dlv_32769.h | 77 ++
src/lib/dns/rdata/generic/ds_43.cc | 112 +--
src/lib/dns/rdata/generic/ds_43.h | 33 +-
src/lib/dns/rdata/generic/spf_99.cc | 44 +
src/lib/dns/rdata/generic/spf_99.h | 26 +
src/lib/dns/rdata/in_1/dhcid_49.cc | 8 +-
src/lib/dns/rdata/in_1/dhcid_49.h | 2 +-
src/lib/dns/tests/Makefile.am | 6 +-
src/lib/dns/tests/rdata_dhcid_unittest.cc | 111 ++
src/lib/dns/tests/rdata_ds_like_unittest.cc | 171 +++
src/lib/dns/tests/rdata_ds_unittest.cc | 99 --
src/lib/dns/tests/rdata_txt_like_unittest.cc | 261 ++++
src/lib/dns/tests/rdata_txt_unittest.cc | 166 ---
src/lib/dns/tests/testdata/Makefile.am | 1 +
src/lib/dns/tests/testdata/rdata_dhcid_fromWire | 12 +
src/lib/dns/tests/testdata/rdata_dhcid_toWire | 7 +
src/lib/log/tests/Makefile.am | 6 +-
src/lib/python/isc/Makefile.am | 2 +-
src/lib/python/isc/config/ccsession.py | 4 +-
src/lib/python/isc/config/tests/ccsession_test.py | 3 +
src/lib/python/isc/datasrc/Makefile.am | 1 -
src/lib/python/isc/datasrc/__init__.py | 16 +
src/lib/python/isc/datasrc/client_inc.cc | 17 +-
src/lib/python/isc/datasrc/client_python.cc | 57 +-
src/lib/python/isc/datasrc/datasrc.cc | 83 +-
src/lib/python/isc/datasrc/finder_inc.cc | 22 +
src/lib/python/isc/datasrc/finder_python.cc | 54 +-
src/lib/python/isc/datasrc/finder_python.h | 10 +-
src/lib/python/isc/datasrc/iterator_python.cc | 19 +-
src/lib/python/isc/datasrc/iterator_python.h | 10 +-
src/lib/python/isc/datasrc/tests/Makefile.am | 7 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 100 ++-
src/lib/python/isc/datasrc/updater_python.cc | 63 +-
src/lib/python/isc/datasrc/updater_python.h | 10 +-
src/lib/python/isc/dns/Makefile.am | 1 +
src/lib/python/isc/log/log.cc | 2 +-
src/lib/python/isc/log_messages/Makefile.am | 2 +
.../python/isc/log_messages/libxfrin_messages.py | 1 +
src/lib/python/isc/xfrin/Makefile.am | 23 +
.../http => lib/python/isc/xfrin}/__init__.py | 0
src/lib/python/isc/xfrin/diff.py | 237 ++++
src/lib/python/isc/xfrin/libxfrin_messages.mes | 21 +
src/lib/python/isc/xfrin/tests/Makefile.am | 24 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 446 +++++++
src/lib/resolve/recursive_query.cc | 2 +
src/lib/resolve/tests/recursive_query_unittest.cc | 24 +-
src/lib/testutils/Makefile.am | 2 +-
src/lib/util/pyunittests/Makefile.am | 7 +-
src/lib/util/unittests/Makefile.am | 2 +-
tests/system/bindctl/tests.sh | 16 +-
tests/system/cleanall.sh | 5 +-
win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj | 5 +-
.../VS2010/b10-dhcp6/b10-dhcp6.vcxproj.filters | 9 +
.../VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj | 9 +-
.../b10-dhcp6_tests.vcxproj.filters | 9 +
win32build/VS2010/bind10.sln | 9 +-
.../VS2010/libbench_tests/libbench_tests.vcxproj | 10 +-
.../libcfgclient_tests/libcfgclient_tests.vcxproj | 12 +-
.../libcfgclient_tests.vcxproj.filters | 6 -
win32build/VS2010/libdatasrc/libdatasrc.vcxproj | 4 +-
.../VS2010/libdatasrc/libdatasrc.vcxproj.filters | 22 +-
.../libdatasrc_tests/libdatasrc_tests.vcxproj | 3 +-
.../libdatasrc_tests.vcxproj.filters | 3 +
win32build/VS2010/libdns++/libdns++.vcxproj | 2 +
.../VS2010/libdns++/libdns++.vcxproj.filters | 6 +
.../VS2010/libdns++_tests/libdns++_tests.vcxproj | 7 +-
.../libdns++_tests/libdns++_tests.vcxproj.filters | 15 +-
win32build/VS2010/pyddatasrc/pyddatasrc.vcxproj | 10 +-
174 files changed, 9581 insertions(+), 4111 deletions(-)
create mode 100644 doc/guide/bind10-guide.txt
create mode 100644 src/bin/dhcp6/dhcp6.h
create mode 100644 src/bin/dhcp6/pkt6.cc
create mode 100644 src/bin/dhcp6/pkt6.h
create mode 100644 src/bin/dhcp6/tests/pkt6_unittest.cc
delete mode 100644 src/bin/stats/stats-schema.spec
mode change 100755 => 100644 src/bin/stats/stats_httpd.py.in
delete mode 100644 src/bin/stats/tests/fake_select.py
delete mode 100644 src/bin/stats/tests/fake_socket.py
delete mode 100644 src/bin/stats/tests/fake_time.py
delete mode 100644 src/bin/stats/tests/http/Makefile.am
delete mode 100644 src/bin/stats/tests/http/server.py
delete mode 100644 src/bin/stats/tests/isc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/session.py
delete mode 100644 src/bin/stats/tests/isc/config/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/config/__init__.py
delete mode 100644 src/bin/stats/tests/isc/config/ccsession.py
delete mode 100644 src/bin/stats/tests/isc/log/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/log/__init__.py
delete mode 100644 src/bin/stats/tests/isc/util/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/util/__init__.py
delete mode 100644 src/bin/stats/tests/isc/util/process.py
create mode 100644 src/bin/stats/tests/test_utils.py
delete mode 100644 src/bin/stats/tests/testdata/Makefile.am
delete mode 100644 src/bin/stats/tests/testdata/stats_test.spec
create mode 100644 src/bin/xfrin/tests/testdata/Makefile.am
create mode 100644 src/bin/xfrin/tests/testdata/example.com
create mode 100644 src/bin/xfrin/tests/testdata/example.com.sqlite3
create mode 100644 src/lib/datasrc/factory.cc
create mode 100644 src/lib/datasrc/factory.h
create mode 100644 src/lib/datasrc/tests/factory_unittest.cc
create mode 100644 src/lib/dns/rdata/generic/detail/ds_like.h
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.cc
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.h
create mode 100644 src/lib/dns/tests/rdata_dhcid_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_ds_like_unittest.cc
delete mode 100644 src/lib/dns/tests/rdata_ds_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_txt_like_unittest.cc
delete mode 100644 src/lib/dns/tests/rdata_txt_unittest.cc
create mode 100644 src/lib/dns/tests/testdata/rdata_dhcid_fromWire
create mode 100644 src/lib/dns/tests/testdata/rdata_dhcid_toWire
create mode 100644 src/lib/python/isc/log_messages/libxfrin_messages.py
create mode 100644 src/lib/python/isc/xfrin/Makefile.am
rename src/{bin/stats/tests/http => lib/python/isc/xfrin}/__init__.py (100%)
create mode 100644 src/lib/python/isc/xfrin/diff.py
create mode 100644 src/lib/python/isc/xfrin/libxfrin_messages.mes
create mode 100644 src/lib/python/isc/xfrin/tests/Makefile.am
create mode 100644 src/lib/python/isc/xfrin/tests/diff_tests.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index b0b2aea..f1f8a9c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,65 @@
+299. [build] jreed
+ Do not install the libfake_session, libtestutils, or libbench
+ libraries. They are used by tests within the source tree.
+ Convert all test-related makefiles to build test code at
+ regular make time to better work with test-driven development.
+ This reverts some of #1901. (The tests are ran using "make
+ check".)
+ (Trac #1286, git cee641fd3d12341d6bfce5a6fbd913e3aebc1e8e)
+
+bind10-devel-20111014 released on October 14, 2011
+
+298. [doc] jreed
+ Shorten README. Include plain text format of the Guide.
+ (git d1897d3, git 337198f)
+
+297. [func] dvv
+ Implement the SPF rrtype according to RFC4408.
+ (Trac #1140, git 146934075349f94ee27f23bf9ff01711b94e369e)
+
+296. [build] jreed
+ Do not install the unittest libraries. At this time, they
+ are not useful without source tree (and they may or may
+ not have googletest support). Also, convert several makefiles
+ to build tests at "check" time and not build time.
+ (Trac #1091, git 2adf4a90ad79754d52126e7988769580d20501c3)
+
+295. [bug] jinmei
+ __init__.py for isc.dns was installed in the wrong directory,
+ which would now make xfrin fail to start. It was also bad
+ in that it replaced any existing __init__.py in th public
+ site-packages directory. After applying this fix You may want to
+ check if the wrong init file is in the wrong place, in which
+ case it should be removed.
+ (Trac #1285, git af3b17472694f58b3d6a56d0baf64601b0f6a6a1)
+
+294. [func] jelte, jinmei, vorner
+ b10-xfrin now supports incoming IXFR. See BIND 10 Guide for
+ how to configure it and operational notes.
+ (Trac #1212, multiple git merges)
+
+293. [func]* tomek
+ b10-dhcp6: Implemented DHCPv6 echo server. It joins DHCPv6
+ multicast groups and listens to incoming DHCPv6 client messages.
+ Received messages are then echoed back to clients. This
+ functionality is limited, but it can be used to test out client
+ resiliency to unexpected messages. Note that network interface
+ detection routines are not implemented yet, so interface name
+ and its address must be specified in interfaces.txt.
+ (Trac #878, git 3b1a604abf5709bfda7271fa94213f7d823de69d)
+
+292. [func] dvv
+ Implement the DLV rrtype according to RFC4431.
+ (Trac #1144, git d267c0511a07c41cd92e3b0b9ee9bf693743a7cf)
+
+291. [func] naokikambe
+ Statistics items are specified by each module's spec file.
+ Stats module can read these through the config manager. Stats
+ module and stats httpd report statistics data and statistics
+ schema by each module via both bindctl and HTTP/XML.
+ (Trac #928,#929,#930,#1175,
+ git 054699635affd9c9ecbe7a108d880829f3ba229e)
+
290. [func] jinmei
libdns++/pydnspp: added an option parameter to the "from wire"
methods of the Message class. One option is defined,
@@ -8,7 +70,7 @@
289. [func]* jinmei
b10-xfrout: ACLs for xfrout can now be configured per zone basis.
- A per zone ACl is part of a more general zone configuration. A
+ A per zone ACL is part of a more general zone configuration. A
quick example for configuring an ACL for zone "example.com" that
rejects any transfer request for that zone is as follows:
> config add Xfrout/zone_config
@@ -24,7 +86,7 @@
configuration.
(Trac #1165, git 698176eccd5d55759fe9448b2c249717c932ac31)
-288. [bug] stephen
+288. [bug] stephen
Fixed problem whereby the order in which component files appeared in
rdataclass.cc was system dependent, leading to problems on some
systems where data types were used before the header file in which
@@ -39,7 +101,7 @@
python files from the common directly (such as "site-packages").
(Trac #1101, git 0eb576518f81c3758c7dbaa2522bd8302b1836b3)
-286. [func] ocean
+286. [func] ocean
libdns++: Implement the HINFO rrtype support according to RFC1034,
and RFC1035.
(Trac #1112, git 12d62d54d33fbb1572a1aa3089b0d547d02924aa)
@@ -55,14 +117,14 @@
log a warning and try to do zone transfer for them.
(Trac #1153, git 0a39659638fc68f60b95b102968d7d0ad75443ea)
-283. [bug] zhanglikun
+283. [bug] zhanglikun
Make stats and boss processes wait for answer messages from each
other in block mode to avoid orphan answer messages, add an internal
command "getstats" to boss process for getting statistics data from
boss.
(Trac #519, git 67d8e93028e014f644868fede3570abb28e5fb43)
-282. [func] ocean
+282. [func] ocean
libdns++: Implement the NAPTR rrtype according to RFC2915,
RFC2168 and RFC3403.
(Trac #1130, git 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c)
diff --git a/Makefile.am b/Makefile.am
index b07ef0f..50aa6b9 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -2,12 +2,16 @@ SUBDIRS = doc src tests
USE_LCOV=@USE_LCOV@
LCOV=@LCOV@
GENHTML=@GENHTML@
+DISTCHECK_GTEST_CONFIGURE_FLAG=@DISTCHECK_GTEST_CONFIGURE_FLAG@
DISTCLEANFILES = config.report
# When running distcheck target, do not install the configurations
DISTCHECK_CONFIGURE_FLAGS = --disable-install-configurations
+# Use same --with-gtest flag if set
+DISTCHECK_CONFIGURE_FLAGS += $(DISTCHECK_GTEST_CONFIGURE_FLAG)
+
clean-cpp-coverage:
@if [ $(USE_LCOV) = yes ] ; then \
$(LCOV) --directory . --zerocounters; \
diff --git a/README b/README
index 4b84a88..99e2ece 100644
--- a/README
+++ b/README
@@ -1,3 +1,4 @@
+
This is the source for the development version of BIND 10.
BIND is the popular implementation of a DNS server, developer
@@ -11,7 +12,7 @@ interfaces. Nevertheless it is ready to use now for testing the
new BIND 10 infrastructure ideas. The Year 3 goals of the five
year plan are described here:
- http://bind10.isc.org/wiki/Year3Goals
+ http://bind10.isc.org/wiki/Year3Goals
This release includes the bind10 master process, b10-msgq message
bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
@@ -21,12 +22,15 @@ AXFR inbound service, b10-xfrout outgoing AXFR service, b10-zonemgr
secondary manager, b10-stats statistics collection and reporting
daemon, b10-stats-httpd for HTTP access to XML-formatted stats,
b10-host DNS lookup utility, and a new libdns++ library for C++
-with a python wrapper.
+with a python wrapper. BIND 10 also provides an experimental DHCPv6
+echo server, b10-dhcp6.
-Documentation is included and also available via the BIND 10
-website at http://bind10.isc.org/
+Documentation is included with the source. See doc/guide/bind10-guide.txt
+(or bind10-guide.html) for installation instructions. The
+documentation is also available via the BIND 10 website at
+http://bind10.isc.org/
-The latest released source may be downloaded from:
+The latest released source tar file may be downloaded from:
ftp://ftp.isc.org/isc/bind10/
@@ -40,15 +44,11 @@ Bugs may be reported as tickets via the developers website:
http://bind10.isc.org/
-BUILDING
-
-See the Guide for detailed installation directions at
-doc/guide/bind10-guide.html.
-
-Simple build instructions:
+Simple build and installation instructions:
./configure
make
+ make install
If building from Git repository, run:
@@ -56,197 +56,11 @@ If building from Git repository, run:
before running ./configure
-Requires autoconf 2.59 or newer.
-
-Use automake-1.11 or better for working Python 3.1 tests.
-Alternatively, you could manually specify an absolute path to python
-executable by the --with-pythonpath option of the configure script,
-e.g.,
-% ./configure --with-pythonpath=/usr/local/bin/python3.1
-
-Operating-System specific tips:
-
-- FreeBSD
- You may need to install a python binding for sqlite3 by hand.
- A sample procedure is as follows:
- - add the following to /etc/make.conf
- PYTHON_VERSION=3.1
- - build and install the python binding from ports, assuming the top
- directory of the ports system is /usr/ports
- % cd /usr/ports/databases/py-sqlite3/
- % make
- % sudo make install
-
-INSTALLATION
+See the Guide for detailed installation directions at
+doc/guide/bind10-guide.txt.
-Install with:
+For operating system specific tips see the wiki at:
- make install
+ http://bind10.isc.org/wiki/SystemSpecificNotes
-TESTS
-
-The tests use the googletests framework for C++. It is available
-from http://code.google.com/p/googletest/. To enable the tests,
-configure BIND 10 with:
-
- ./configure --with-gtest
-
-Then run "make check" to run these tests.
-
-TEST COVERAGE
-
-Code coverage reports may be generated using make. These are
-based on running on the unit tests. The resulting reports are placed
-in coverage-cpp-html and coverage-python-html directories for C++
-and Python, respectively.
-
-The code coverage report for the C++ tests uses LCOV. It is available
-from http://ltp.sourceforge.net/. To generate the HTML report,
-first configure BIND 10 with:
-
- ./configure --with-lcov
-
-The code coverage report for the Python tests uses coverage.py (aka
-pycoverage). It is available from http://nedbatchelder.com/code/coverage/.
-To generate the HTML report, first configure BIND 10 with:
-
- ./configure --with-pycoverage
-
-Doing code coverage tests:
-
- make coverage
- Does the clean, perform, and report targets for C++ and Python.
-
- make clean-coverage
- Zeroes the code coverage counters and removes the HTML reports
- for C++ and Python.
-
- make perform-coverage
- Runs the C++ (using the googletests framework) and Python
- tests.
-
- make report-coverage
- Generates the coverage reports in HTML for C++ and Python.
-
- make clean-cpp-coverage
- Zeroes the code coverage counters and removes the HTML report
- for the C++ tests.
-
- make clean-python-coverage
- Zeroes the code coverage counters and removes the HTML report
- for the Python tests.
-
- make report-cpp-coverage
- Generates the coverage report in HTML for C++, excluding
- some unrelated headers. The HTML reports are placed in a
- directory called coverage-cpp-html/.
-
- make report-python-coverage
- Generates the coverage report in HTML for Python. The HTML
- reports are placed in a directory called coverage-python-html/.
-
-DEVELOPERS
-
-The generated run_*.sh scripts available in the src/bin directories
-are for running the code using the source tree.
-
-RUNNING
-
-You can start the BIND 10 processes by running bind10 which is
-installed to the sbin directory under the installation prefix.
-The default location is:
-
- /usr/local/sbin/bind10
-
-For development work, you can also run the bind10 services from the
-source tree:
-
- ./src/bin/bind10/run_bind10.sh
-
-(Which will use the modules and configurations also from the source
-tree.)
-
-CONFIGURATION
-
-Commands can be given through the bindctl tool.
-
-The server must be running for bindctl to work.
-
-The following configuration commands are available
-
-help: show the different command modules
-<module> help: show the commands for module
-<module> <command> help: show info for the command
-
-
-config show [identifier]: Show the currently set values. If no identifier is
- given, the current location is used. If a config
- option is a list or a map, the value is not
- shown directly, but must be requested separately.
-config go [identifier]: Go to the given location within the configuration.
-config set [identifier] <value>: Set a configuration value.
-config unset [identifier]: Remove a value (reverts to default if the option
- is mandatory).
-config add [identifier] <value>: add a value to a list
-config remove [identifier] <value>: remove a value from a list
-config revert: Revert all changes that have not been committed
-config commit: Commit all changes
-config diff: Show the changes that have not been committed yet
-
-
-EXAMPLE SESSION
-
-~> bindctl
-["login success "] login as root
-> help
-BindCtl, verstion 0.1
-usage: <module name> <command name> [param1 = value1 [, param2 = value2]]
-Type Tab character to get the hint of module/command/paramters.
-Type "help(? h)" for help on bindctl.
-Type "<module_name> help" for help on the specific module.
-Type "<module_name> <command_name> help" for help on the specific command.
-
-Available module names:
- help Get help for bindctl
- config Configuration commands
- Xfrin same here
- Auth same here
- Boss same here
-> config help
-Module config Configuration commands
-Available commands:
- help (Get help for module)
- show (Show configuration)
- add (Add entry to configuration list)
- remove (Remove entry from configuration list)
- set (Set a configuration value)
- unset (Unset a configuration value)
- diff (Show all local changes)
- revert (Revert all local changes)
- commit (Commit all local changes)
- go (Go to a specific configuration part)
-> config show
-Xfrin/ module
-Auth/ module
-Boss/ module
-> config show Xfrin
-transfers_in: 10 integer
-> config go Auth
-/Auth> config show
-database_file: None string
-/Auth> config set database_file /tmp/bind10_zones.db
-/Auth> config commit
-/Auth> config go /
-> config show Auth/
-database_file: /tmp/bind10_zones.db string
-> config diff
-{}
-> config set Auth/foobar
-Error: missing identifier or value
-> config set Auth/database_file foobar
-> config diff
-{'Auth': {'database_file': 'foobar'}}
-> config revert
-> config diff
-{}
-> quit
+Please see the wiki and the doc/ directory for various documentation.
diff --git a/WIN32-NOTES b/WIN32-NOTES
index 1d48f9f..81bed5d 100644
--- a/WIN32-NOTES
+++ b/WIN32-NOTES
@@ -158,5 +158,6 @@ Random notes (for porting new code):
Test status: (unittests failures)
cfgclient: LogConfigTest (1) (environment related???)
+ resolve: *imeout* (+crash in debug, double delete? temporary fix...)
b10-dhcp6: can't work as Windows has no loopback interface...
diff --git a/configure.ac b/configure.ac
index a94912e..b0f5f45 100644
--- a/configure.ac
+++ b/configure.ac
@@ -650,6 +650,7 @@ fi
#
if test "$gtest_path" != "no"
then
+ DISTCHECK_GTEST_CONFIGURE_FLAG="--with-gtest=\"$gtest_path\""
if test "$gtest_path" != "yes"; then
GTEST_PATHS=$gtest_path
if test -x "${gtest_path}/bin/gtest-config" ; then
@@ -690,8 +691,10 @@ else
GTEST_INCLUDES=
GTEST_LDFLAGS=
GTEST_LDADD=
+ DISTCHECK_GTEST_CONFIGURE_FLAG=
fi
AM_CONDITIONAL(HAVE_GTEST, test $gtest_path != "no")
+AC_SUBST(DISTCHECK_GTEST_CONFIGURE_FLAG)
AC_SUBST(GTEST_INCLUDES)
AC_SUBST(GTEST_LDFLAGS)
AC_SUBST(GTEST_LDADD)
@@ -811,20 +814,13 @@ AC_CONFIG_FILES([Makefile
src/bin/sockcreator/tests/Makefile
src/bin/xfrin/Makefile
src/bin/xfrin/tests/Makefile
+ src/bin/xfrin/tests/testdata/Makefile
src/bin/xfrout/Makefile
src/bin/xfrout/tests/Makefile
src/bin/zonemgr/Makefile
src/bin/zonemgr/tests/Makefile
src/bin/stats/Makefile
src/bin/stats/tests/Makefile
- src/bin/stats/tests/isc/Makefile
- src/bin/stats/tests/isc/cc/Makefile
- src/bin/stats/tests/isc/config/Makefile
- src/bin/stats/tests/isc/util/Makefile
- src/bin/stats/tests/isc/log/Makefile
- src/bin/stats/tests/isc/log_messages/Makefile
- src/bin/stats/tests/testdata/Makefile
- src/bin/stats/tests/http/Makefile
src/bin/usermgr/Makefile
src/bin/tests/Makefile
src/lib/Makefile
@@ -861,6 +857,8 @@ AC_CONFIG_FILES([Makefile
src/lib/python/isc/testutils/Makefile
src/lib/python/isc/bind10/Makefile
src/lib/python/isc/bind10/tests/Makefile
+ src/lib/python/isc/xfrin/Makefile
+ src/lib/python/isc/xfrin/tests/Makefile
src/lib/config/Makefile
src/lib/config/tests/Makefile
src/lib/config/tests/testdata/Makefile
diff --git a/doc/Doxyfile b/doc/Doxyfile
index 71b0738..8be9098 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -574,7 +574,7 @@ INPUT = ../src/lib/exceptions ../src/lib/cc \
../src/lib/log/compiler ../src/lib/asiolink/ ../src/lib/nsas \
../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \
../src/bin/sockcreator/ ../src/lib/util/ \
- ../src/lib/resolve ../src/lib/acl
+ ../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index c84ad06..239f235 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -1,5 +1,5 @@
EXTRA_DIST = bind10-guide.css
-EXTRA_DIST += bind10-guide.xml bind10-guide.html
+EXTRA_DIST += bind10-guide.xml bind10-guide.html bind10-guide.txt
EXTRA_DIST += bind10-messages.xml bind10-messages.html
# This is not a "man" manual, but reuse this for now for docbook.
@@ -15,6 +15,11 @@ bind10-guide.html: bind10-guide.xml
http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
$(srcdir)/bind10-guide.xml
+HTML2TXT = elinks -dump -no-numbering -no-references
+
+bind10-guide.txt: bind10-guide.html
+ $(HTML2TXT) $(srcdir)/bind10-guide.html > $@
+
bind10-messages.html: bind10-messages.xml
xsltproc --novalid --xinclude --nonet \
--path $(top_builddir)/doc \
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index a9a4cc6..97ffb84 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,12 +1,14 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
lass="releaseinfo">This is the reference guide for BIND 10 version
20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
This is the reference guide for BIND 10 version 20110809.
- The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><di
v class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ The most up-to-date version of this document (in PDF, HTML,
+ and plain text formats), along with other documents for
+ BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+ </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b
10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfe
rs</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></dd></dl><
/div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
@@ -18,7 +20,7 @@
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
which also provides forwarding.
- </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460181"></a>Supported Platforms</h2></div></div></div><p>
+ </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229451238"></a>Supported Platforms</h2></div></div></div><p>
BIND 10 builds have been tested on Debian GNU/Linux 5,
Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
Linux 5.3.
@@ -28,7 +30,7 @@
It is planned for BIND 10 to build, install and run on
Windows and standard Unix-type platforms.
- </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460208"></a>Required Software</h2></div></div></div><p>
+ </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229451265"></a>Required Software</h2></div></div></div><p>
BIND 10 requires Python 3.1. Later versions may work, but Python
3.1 is the minimum version which will work.
</p><p>
@@ -138,7 +140,7 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229445988"></a>Building Requirements</h2></div></div></div><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229436567"></a>Building Requirements</h2></div></div></div><p>
In addition to the run-time requirements, building BIND 10
from source code requires various development include headers.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -202,14 +204,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446178"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436859"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446197"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436878"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -243,7 +245,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446258"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436939"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -274,16 +276,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446356"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437037"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446371"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437052"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446394"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437076"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -505,12 +507,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229446979"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437660"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -530,7 +532,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447044"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437725"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -544,7 +546,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447074"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437755"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -573,28 +575,69 @@ This may be a temporary setting until then.
If you reload a zone already existing in the database,
all records from that prior zone disappear and a whole new set
appears.
- </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><p>
+ </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
Incoming zones are transferred using the <span class="command"><strong>b10-xfrin</strong></span>
process which is started by <span class="command"><strong>bind10</strong></span>.
- When received, the zone is stored in the BIND 10
- data store, and its records can be served by
+ When received, the zone is stored in the corresponding BIND 10
+ data source, and its records can be served by
<span class="command"><strong>b10-auth</strong></span>.
In combination with <span class="command"><strong>b10-zonemgr</strong></span> (for
automated SOA checks), this allows the BIND 10 server to
provide <span class="quote">“<span class="quote">secondary</span>”</span> service.
+ </p><p>
+ The <span class="command"><strong>b10-xfrin</strong></span> process supports both AXFR and
+ IXFR. Due to some implementation limitations of the current
+ development release, however, it only tries AXFR by default,
+ and care should be taken to enable IXFR.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
-
-
-
- </p></div><p>
- To manually trigger a zone transfer to retrieve a remote zone,
- you may use the <span class="command"><strong>bindctl</strong></span> utility.
- For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
-
- </p><pre class="screen">> <strong class="userinput"><code>Xfrin retransfer zone_name="<code class="option">foo.example.org</code>" master=<code class="option">192.0.2.99</code></code></strong></pre><p>
- </p></div><div class="chapter" title="Chapter 10. Outbound Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrout"></a>Chapter 10. Outbound Zone Transfers</h2></div></div></div><p>
+ In the current development release of BIND 10, incoming zone
+ transfers are only available for SQLite3-based data sources,
+ that is, they don't work for an in-memory data source.
+ </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437989"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
+ In practice, you need to specify a list of secondary zones to
+ enable incoming zone transfers for these zones (you can still
+ trigger a zone transfer manually, without a prior configuration
+ (see below)).
+ </p><p>
+ For example, to enable zone transfers for a zone named "example.com"
+ (whose master address is assumed to be 2001:db8::53 here),
+ run the following at the <span class="command"><strong>bindctl</strong></span> prompt:
+
+ </p><pre class="screen">> <strong class="userinput"><code>config add Xfrin/zones</code></strong>
+> <strong class="userinput"><code>config set Xfrin/zones[0]/name "<code class="option">example.com</code>"</code></strong>
+> <strong class="userinput"><code>config set Xfrin/zones[0]/master_addr "<code class="option">2001:db8::53</code>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+
+ (We assume there has been no zone configuration before).
+ </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438027"></a>Enabling IXFR</h2></div></div></div><p>
+ As noted above, <span class="command"><strong>b10-xfrin</strong></span> uses AXFR for
+ zone transfers by default. To enable IXFR for zone transfers
+ for a particular zone, set the <strong class="userinput"><code>use_ixfr</code></strong>
+ configuration parameter to <strong class="userinput"><code>true</code></strong>.
+ In the above example of configuration sequence, you'll need
+ to add the following before performing <strong class="userinput"><code>commit</code></strong>:
+ </p><pre class="screen">> <strong class="userinput"><code>config set Xfrin/zones[0]/use_ixfr true</code></strong></pre><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ One reason why IXFR is disabled by default in the current
+ release is because it does not support automatic fallback from IXFR to
+ AXFR when it encounters a primary server that doesn't support
+ outbound IXFR (and, not many existing implementations support
+ it). Another, related reason is that it does not use AXFR even
+ if it has no knowledge about the zone (like at the very first
+ time the secondary server is set up). IXFR requires the
+ "current version" of the zone, so obviously it doesn't work
+ in this situation and AXFR is the only workable choice.
+ The current release of <span class="command"><strong>b10-xfrin</strong></span> does not
+ make this selection automatically.
+ These features will be implemented in a near future
+ version, at which point we will enable IXFR by default.
+ </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438069"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
+ To manually trigger a zone transfer to retrieve a remote zone,
+ you may use the <span class="command"><strong>bindctl</strong></span> utility.
+ For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
+
+ </p><pre class="screen">> <strong class="userinput"><code>Xfrin retransfer zone_name="<code class="option">foo.example.org</code>" master=<code class="option">192.0.2.99</code></code></strong></pre><p>
+ </p></div></div><div class="chapter" title="Chapter 10. Outbound Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrout"></a>Chapter 10. Outbound Zone Transfers</h2></div></div></div><p>
The <span class="command"><strong>b10-xfrout</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
When the <span class="command"><strong>b10-auth</strong></span> authoritative DNS server
@@ -622,7 +665,7 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -656,7 +699,7 @@ This may be a temporary setting until then.
</pre><p>
</p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
- Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447556"></a>Access Control</h2></div></div></div><p>
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438327"></a>Access Control</h2></div></div></div><p>
By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
DNS queries from the localhost (127.0.0.1 and ::1).
The <code class="option">Resolver/query_acl</code> configuration may
@@ -689,7 +732,7 @@ This may be a temporary setting until then.
</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
- syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447671"></a>Forwarding</h2></div></div></div><p>
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438512"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -717,27 +760,33 @@ This may be a temporary setting until then.
</p><p>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <span class="command"><strong>bindctl</strong></span>:
</p><pre class="screen">
> <strong class="userinput"><code>Stats show</code></strong>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</pre><p>
- </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447788"></a>Logging configuration</h2></div></div></div><p>
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438628"></a>Logging configuration</h2></div></div></div><p>
The logging system in BIND 10 is configured through the
Logging module. All BIND 10 modules will look at the
@@ -746,7 +795,7 @@ This may be a temporary setting until then.
- </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229447799"></a>Loggers</h3></div></div></div><p>
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229438638"></a>Loggers</h3></div></div></div><p>
Within BIND 10, a message is logged through a component
called a "logger". Different parts of BIND 10 log messages
@@ -767,7 +816,7 @@ This may be a temporary setting until then.
(what to log), and the <code class="option">output_options</code>
(where to log).
- </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447824"></a>name (string)</h4></div></div></div><p>
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229438663"></a>name (string)</h4></div></div></div><p>
Each logger in the system has a name, the name being that
of the component using it to log messages. For instance,
if you want to configure logging for the resolver module,
@@ -840,7 +889,7 @@ This may be a temporary setting until then.
<span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
- </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447923"></a>severity (string)</h4></div></div></div><p>
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439035"></a>severity (string)</h4></div></div></div><p>
This specifies the category of messages logged.
Each message is logged with an associated severity which
@@ -856,7 +905,7 @@ This may be a temporary setting until then.
- </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447973"></a>output_options (list)</h4></div></div></div><p>
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439086"></a>output_options (list)</h4></div></div></div><p>
Each logger can have zero or more
<code class="option">output_options</code>. These specify where log
@@ -866,7 +915,7 @@ This may be a temporary setting until then.
The other options for a logger are:
- </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447990"></a>debuglevel (integer)</h4></div></div></div><p>
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439102"></a>debuglevel (integer)</h4></div></div></div><p>
When a logger's severity is set to DEBUG, this value
specifies what debug messages should be printed. It ranges
@@ -875,7 +924,7 @@ This may be a temporary setting until then.
If severity for the logger is not DEBUG, this value is ignored.
- </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448005"></a>additive (true or false)</h4></div></div></div><p>
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439117"></a>additive (true or false)</h4></div></div></div><p>
If this is true, the <code class="option">output_options</code> from
the parent will be used. For example, if there are two
@@ -889,18 +938,18 @@ This may be a temporary setting until then.
- </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448040"></a>Output Options</h3></div></div></div><p>
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439154"></a>Output Options</h3></div></div></div><p>
The main settings for an output option are the
<code class="option">destination</code> and a value called
<code class="option">output</code>, the meaning of which depends on
the destination that is set.
- </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448056"></a>destination (string)</h4></div></div></div><p>
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439169"></a>destination (string)</h4></div></div></div><p>
The destination is the type of output. It can be one of:
- </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448088"></a>output (string)</h4></div></div></div><p>
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439201"></a>output (string)</h4></div></div></div><p>
Depending on what is set as the output destination, this
value is interpreted as follows:
@@ -922,12 +971,12 @@ This may be a temporary setting until then.
The other options for <code class="option">output_options</code> are:
- </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448172"></a>flush (true of false)</h5></div></div></div><p>
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439286"></a>flush (true of false)</h5></div></div></div><p>
Flush buffers after each log message. Doing this will
reduce performance but will ensure that if the program
terminates abnormally, all messages up to the point of
termination are output.
- </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448182"></a>maxsize (integer)</h5></div></div></div><p>
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439296"></a>maxsize (integer)</h5></div></div></div><p>
Only relevant when destination is file, this is maximum
file size of output files in bytes. When the maximum
size is reached, the file is renamed and a new file opened.
@@ -936,11 +985,11 @@ This may be a temporary setting until then.
etc.)
</p><p>
If this is 0, no maximum file size is used.
- </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448196"></a>maxver (integer)</h5></div></div></div><p>
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439308"></a>maxver (integer)</h5></div></div></div><p>
Maximum number of old log files to keep around when
rolling the output file. Only relevant when
<code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
- </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448215"></a>Example session</h3></div></div></div><p>
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439328"></a>Example session</h3></div></div></div><p>
In this example we want to set the global logging to
write to the file <code class="filename">/var/log/my_bind10.log</code>,
@@ -1101,7 +1150,7 @@ Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
And every module will now be using the values from the
logger named <span class="quote">“<span class="quote">*</span>”</span>.
- </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229448428"></a>Logging Message Format</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439609"></a>Logging Message Format</h2></div></div></div><p>
Each message written by BIND 10 to the configured logging
destinations comprises a number of components that identify
the origin of the message and, if the message indicates
diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt
new file mode 100644
index 0000000..619d56f
--- /dev/null
+++ b/doc/guide/bind10-guide.txt
@@ -0,0 +1,1201 @@
+ BIND 10 Guide
+
+Administrator Reference for BIND 10
+
+ This is the reference guide for BIND 10 version 20110809.
+
+ Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
+
+ Abstract
+
+ BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems
+ Consortium (ISC). It includes DNS libraries and modular components for
+ controlling authoritative and recursive DNS servers.
+
+ This is the reference guide for BIND 10 version 20110809. The most
+ up-to-date version of this document (in PDF, HTML, and plain text
+ formats), along with other documents for BIND 10, can be found at
+ http://bind10.isc.org/docs.
+
+ --------------------------------------------------------------------------
+
+ Table of Contents
+
+ 1. Introduction
+
+ Supported Platforms
+
+ Required Software
+
+ Starting and Stopping the Server
+
+ Managing BIND 10
+
+ 2. Installation
+
+ Building Requirements
+
+ Quick start
+
+ Installation from source
+
+ Download Tar File
+
+ Retrieve from Git
+
+ Configure before the build
+
+ Build
+
+ Install
+
+ Install Hierarchy
+
+ 3. Starting BIND10 with bind10
+
+ Starting BIND 10
+
+ 4. Command channel
+
+ 5. Configuration manager
+
+ 6. Remote control daemon
+
+ Configuration specification for b10-cmdctl
+
+ 7. Control and configure user interface
+
+ 8. Authoritative Server
+
+ Server Configurations
+
+ Data Source Backends
+
+ Loading Master Zones Files
+
+ 9. Incoming Zone Transfers
+
+ Configuration for Incoming Zone Transfers
+
+ Enabling IXFR
+
+ Trigger an Incoming Zone Transfer Manually
+
+ 10. Outbound Zone Transfers
+
+ 11. Secondary Manager
+
+ 12. Recursive Name Server
+
+ Access Control
+
+ Forwarding
+
+ 13. Statistics
+
+ 14. Logging
+
+ Logging configuration
+
+ Loggers
+
+ Output Options
+
+ Example session
+
+ Logging Message Format
+
+Chapter 1. Introduction
+
+ Table of Contents
+
+ Supported Platforms
+
+ Required Software
+
+ Starting and Stopping the Server
+
+ Managing BIND 10
+
+ BIND is the popular implementation of a DNS server, developer interfaces,
+ and DNS tools. BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++
+ and Python and provides a modular environment for serving and maintaining
+ DNS.
+
+ Note
+
+ This guide covers the experimental prototype of BIND 10 version 20110809.
+
+ Note
+
+ BIND 10 provides a EDNS0- and DNSSEC-capable authoritative DNS server and
+ a caching recursive name server which also provides forwarding.
+
+Supported Platforms
+
+ BIND 10 builds have been tested on Debian GNU/Linux 5, Ubuntu 9.10, NetBSD
+ 5, Solaris 10, FreeBSD 7 and 8, and CentOS Linux 5.3. It has been tested
+ on Sparc, i386, and amd64 hardware platforms. It is planned for BIND 10 to
+ build, install and run on Windows and standard Unix-type platforms.
+
+Required Software
+
+ BIND 10 requires Python 3.1. Later versions may work, but Python 3.1 is
+ the minimum version which will work.
+
+ BIND 10 uses the Botan crypto library for C++. It requires at least Botan
+ version 1.8.
+
+ BIND 10 uses the log4cplus C++ logging library. It requires at least
+ log4cplus version 1.0.3.
+
+ The authoritative server requires SQLite 3.3.9 or newer. The b10-xfrin,
+ b10-xfrout, and b10-zonemgr modules require the libpython3 library and the
+ Python _sqlite3.so module.
+
+ Note
+
+ Some operating systems do not provide these dependencies in their default
+ installation nor standard packages collections. You may need to install
+ them separately.
+
+Starting and Stopping the Server
+
+ BIND 10 is modular. Part of this modularity is accomplished using multiple
+ cooperating processes which, together, provide the server functionality.
+ This is a change from the previous generation of BIND software, which used
+ a single process.
+
+ At first, running many different processes may seem confusing. However,
+ these processes are started, stopped, and maintained by a single command,
+ bind10. This command starts a master process which will start other
+ processes as needed. The processes started by the bind10 command have
+ names starting with "b10-", including:
+
+ o b10-msgq -- Message bus daemon. This process coordinates communication
+ between all of the other BIND 10 processes.
+ o b10-auth -- Authoritative DNS server. This process serves DNS
+ requests.
+ o b10-cfgmgr -- Configuration manager. This process maintains all of the
+ configuration for BIND 10.
+ o b10-cmdctl -- Command and control service. This process allows
+ external control of the BIND 10 system.
+ o b10-resolver -- Recursive name server. This process handles incoming
+ queries.
+ o b10-stats -- Statistics collection daemon. This process collects and
+ reports statistics data.
+ o b10-xfrin -- Incoming zone transfer service. This process is used to
+ transfer a new copy of a zone into BIND 10, when acting as a secondary
+ server.
+ o b10-xfrout -- Outgoing zone transfer service. This process is used to
+ handle transfer requests to send a local zone to a remote secondary
+ server, when acting as a master server.
+ o b10-zonemgr -- Secondary manager. This process keeps track of timers
+ and other necessary information for BIND 10 to act as a slave server.
+
+ These are ran automatically by bind10 and do not need to be run manually.
+
+Managing BIND 10
+
+ Once BIND 10 is running, a few commands are used to interact directly with
+ the system:
+
+ o bindctl -- interactive administration interface. This is a
+ command-line tool which allows an administrator to control BIND 10.
+ o b10-loadzone -- zone file loader. This tool will load standard
+ masterfile-format zone files into BIND 10.
+ o b10-cmdctl-usermgr -- user access control. This tool allows an
+ administrator to authorize additional users to manage BIND 10.
+
+ The tools and modules are covered in full detail in this guide. In
+ addition, manual pages are also provided in the default installation.
+
+ BIND 10 also provides libraries and programmer interfaces for C++ and
+ Python for the message bus, configuration backend, and, of course, DNS.
+ These include detailed developer documentation and code examples.
+
+Chapter 2. Installation
+
+ Table of Contents
+
+ Building Requirements
+
+ Quick start
+
+ Installation from source
+
+ Download Tar File
+
+ Retrieve from Git
+
+ Configure before the build
+
+ Build
+
+ Install
+
+ Install Hierarchy
+
+Building Requirements
+
+ In addition to the run-time requirements, building BIND 10 from source
+ code requires various development include headers.
+
+ Note
+
+ Some operating systems have split their distribution packages into a
+ run-time and a development package. You will need to install the
+ development package versions, which include header files and libraries, to
+ build BIND 10 from source code.
+
+ Building from source code requires the Boost build-time headers. At least
+ Boost version 1.35 is required.
+
+ To build BIND 10, also install the Botan (at least version 1.8) and the
+ log4cplus (at least version 1.0.3) development include headers.
+
+ The Python Library and Python _sqlite3 module are required to enable the
+ Xfrout and Xfrin support.
+
+ Note
+
+ The Python related libraries and modules need to be built for Python 3.1.
+
+ Building BIND 10 also requires a C++ compiler and standard development
+ headers, make, and pkg-config. BIND 10 builds have been tested with GCC
+ g++ 3.4.3, 4.1.2, 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++
+ 5.10.
+
+Quick start
+
+ Note
+
+ This quickly covers the standard steps for installing and deploying BIND
+ 10 as an authoritative name server using its defaults. For
+ troubleshooting, full customizations and further details, see the
+ respective chapters in the BIND 10 guide.
+
+ To quickly get started with BIND 10, follow these steps.
+
+ 1. Install required build dependencies.
+ 2. Download the BIND 10 source tar file from
+ ftp://ftp.isc.org/isc/bind10/.
+ 3. Extract the tar file:
+
+ $ gzcat bind10-VERSION.tar.gz | tar -xvf -
+
+ 4. Go into the source and run configure:
+
+ $ cd bind10-VERSION
+ $ ./configure
+
+ 5. Build it:
+
+ $ make
+
+ 6. Install it (to default /usr/local):
+
+ $ make install
+
+ 7. Start the server:
+
+ $ /usr/local/sbin/bind10
+
+ 8. Test it; for example:
+
+ $ dig @127.0.0.1 -c CH -t TXT authors.bind
+
+ 9. Load desired zone file(s), for example:
+
+ $ b10-loadzone your.zone.example.org
+
+ 10. Test the new zone.
+
+Installation from source
+
+ BIND 10 is open source software written in C++ and Python. It is freely
+ available in source code form from ISC via the Git code revision control
+ system or as a downloadable tar file. It may also be available in
+ pre-compiled ready-to-use packages from operating system vendors.
+
+ Download Tar File
+
+ Downloading a release tar file is the recommended method to obtain the
+ source code.
+
+ The BIND 10 releases are available as tar file downloads from
+ ftp://ftp.isc.org/isc/bind10/. Periodic development snapshots may also be
+ available.
+
+ Retrieve from Git
+
+ Downloading this "bleeding edge" code is recommended only for developers
+ or advanced users. Using development code in a production environment is
+ not recommended.
+
+ Note
+
+ When using source code retrieved via Git additional software will be
+ required: automake (v1.11 or newer), libtoolize, and autoconf (2.59 or
+ newer). These may need to be installed.
+
+ The latest development code, including temporary experiments and
+ un-reviewed code, is available via the BIND 10 code revision control
+ system. This is powered by Git and all the BIND 10 development is public.
+ The leading development is done in the "master".
+
+ The code can be checked out from git://bind10.isc.org/bind10; for example:
+
+ $ git clone git://bind10.isc.org/bind10
+
+ When checking out the code from the code version control system, it
+ doesn't include the generated configure script, Makefile.in files, nor the
+ related configure files. They can be created by running autoreconf with
+ the --install switch. This will run autoconf, aclocal, libtoolize,
+ autoheader, automake, and related commands.
+
+ Configure before the build
+
+ BIND 10 uses the GNU Build System to discover build environment details.
+ To generate the makefiles using the defaults, simply run:
+
+ $ ./configure
+
+ Run ./configure with the --help switch to view the different options. The
+ commonly-used options are:
+
+ --prefix
+ Define the installation location (the default is /usr/local/).
+
+ --with-boost-include
+ Define the path to find the Boost headers.
+
+ --with-pythonpath
+ Define the path to Python 3.1 if it is not in the standard
+ execution path.
+
+ --with-gtest
+ Enable building the C++ Unit Tests using the Google Tests
+ framework. Optionally this can define the path to the gtest header
+ files and library.
+
+ For example, the following configures it to find the Boost headers, find
+ the Python interpreter, and sets the installation location:
+
+ $ ./configure \
+ --with-boost-include=/usr/pkg/include \
+ --with-pythonpath=/usr/pkg/bin/python3.1 \
+ --prefix=/opt/bind10
+
+ If the configure fails, it may be due to missing or old dependencies.
+
+ Build
+
+ After the configure step is complete, to build the executables from the
+ C++ code and prepare the Python scripts, run:
+
+ $ make
+
+ Install
+
+ To install the BIND 10 executables, support files, and documentation, run:
+
+ $ make install
+
+ Note
+
+ The install step may require superuser privileges.
+
+ Install Hierarchy
+
+ The following is the layout of the complete BIND 10 installation:
+
+ o bin/ -- general tools and diagnostic clients.
+ o etc/bind10-devel/ -- configuration files.
+ o lib/ -- libraries and python modules.
+ o libexec/bind10-devel/ -- executables that a user wouldn't normally run
+ directly and are not run independently. These are the BIND 10 modules
+ which are daemons started by the bind10 tool.
+ o sbin/ -- commands used by the system administrator.
+ o share/bind10-devel/ -- configuration specifications.
+ o share/man/ -- manual pages (online documentation).
+ o var/bind10-devel/ -- data source and configuration databases.
+
+Chapter 3. Starting BIND10 with bind10
+
+ Table of Contents
+
+ Starting BIND 10
+
+ BIND 10 provides the bind10 command which starts up the required
+ processes. bind10 will also restart processes that exit unexpectedly. This
+ is the only command needed to start the BIND 10 system.
+
+ After starting the b10-msgq communications channel, bind10 connects to it,
+ runs the configuration manager, and reads its own configuration. Then it
+ starts the other modules.
+
+ The b10-msgq and b10-cfgmgr services make up the core. The b10-msgq daemon
+ provides the communication channel between every part of the system. The
+ b10-cfgmgr daemon is always needed by every module, if only to send
+ information about themselves somewhere, but more importantly to ask about
+ their own settings, and about other modules. The bind10 master process
+ will also start up b10-cmdctl for admins to communicate with the system,
+ b10-auth for authoritative DNS service or b10-resolver for recursive name
+ service, b10-stats for statistics collection, b10-xfrin for inbound DNS
+ zone transfers, b10-xfrout for outbound DNS zone transfers, and
+ b10-zonemgr for secondary service.
+
+Starting BIND 10
+
+ To start the BIND 10 service, simply run bind10. Run it with the --verbose
+ switch to get additional debugging or diagnostic output.
+
+ Note
+
+ If the setproctitle Python module is detected at start up, the process
+ names for the Python-based daemons will be renamed to better identify them
+ instead of just "python". This is not needed on some operating systems.
+
+Chapter 4. Command channel
+
+ The BIND 10 components use the b10-msgq message routing daemon to
+ communicate with other BIND 10 components. The b10-msgq implements what is
+ called the "Command Channel". Processes intercommunicate by sending
+ messages on the command channel. Example messages include shutdown, get
+ configurations, and set configurations. This Command Channel is not used
+ for DNS message passing. It is used only to control and monitor the BIND
+ 10 system.
+
+ Administrators do not communicate directly with the b10-msgq daemon. By
+ default, BIND 10 uses port 9912 for the b10-msgq service. It listens on
+ 127.0.0.1.
+
+Chapter 5. Configuration manager
+
+ The configuration manager, b10-cfgmgr, handles all BIND 10 system
+ configuration. It provides persistent storage for configuration, and
+ notifies running modules of configuration changes.
+
+ The b10-auth and b10-xfrin daemons and other components receive their
+ configurations from the configuration manager over the b10-msgq command
+ channel.
+
+ The administrator doesn't connect to it directly, but uses a user
+ interface to communicate with the configuration manager via b10-cmdctl's
+ REST-ful interface. b10-cmdctl is covered in Chapter 6, Remote control
+ daemon.
+
+ Note
+
+ The development prototype release only provides the bindctl as a user
+ interface to b10-cmdctl. Upcoming releases will provide another
+ interactive command-line interface and a web-based interface.
+
+ The b10-cfgmgr daemon can send all specifications and all current settings
+ to the bindctl client (via b10-cmdctl).
+
+ b10-cfgmgr relays configurations received from b10-cmdctl to the
+ appropriate modules.
+
+ The stored configuration file is at
+ /usr/local/var/bind10-devel/b10-config.db. (The full path is what was
+ defined at build configure time for --localstatedir. The default is
+ /usr/local/var/.) The format is loosely based on JSON and is directly
+ parseable python, but this may change in a future version. This
+ configuration data file is not manually edited by the administrator.
+
+ The configuration manager does not have any command line arguments.
+ Normally it is not started manually, but is automatically started using
+ the bind10 master process (as covered in Chapter 3, Starting BIND10 with
+ bind10).
+
+Chapter 6. Remote control daemon
+
+ Table of Contents
+
+ Configuration specification for b10-cmdctl
+
+ b10-cmdctl is the gateway between administrators and the BIND 10 system.
+ It is a HTTPS server that uses standard HTTP Digest Authentication for
+ username and password validation. It provides a REST-ful interface for
+ accessing and controlling BIND 10.
+
+ When b10-cmdctl starts, it firsts asks b10-cfgmgr about what modules are
+ running and what their configuration is (over the b10-msgq channel). Then
+ it will start listening on HTTPS for clients -- the user interface -- such
+ as bindctl.
+
+ b10-cmdctl directly sends commands (received from the user interface) to
+ the specified component. Configuration changes are actually commands to
+ b10-cfgmgr so are sent there.
+
+ The HTTPS server requires a private key, such as a RSA PRIVATE KEY. The
+ default location is at /usr/local/etc/bind10-devel/cmdctl-keyfile.pem. (A
+ sample key is at /usr/local/share/bind10-devel/cmdctl-keyfile.pem.) It
+ also uses a certificate located at
+ /usr/local/etc/bind10-devel/cmdctl-certfile.pem. (A sample certificate is
+ at /usr/local/share/bind10-devel/cmdctl-certfile.pem.) This may be a
+ self-signed certificate or purchased from a certification authority.
+
+ Note
+
+ The HTTPS server doesn't support a certificate request from a client (at
+ this time). The b10-cmdctl daemon does not provide a public service. If
+ any client wants to control BIND 10, then a certificate needs to be first
+ received from the BIND 10 administrator. The BIND 10 installation provides
+ a sample PEM bundle that matches the sample key and certificate.
+
+ The b10-cmdctl daemon also requires the user account file located at
+ /usr/local/etc/bind10-devel/cmdctl-accounts.csv. This comma-delimited file
+ lists the accounts with a user name, hashed password, and salt. (A sample
+ file is at /usr/local/share/bind10-devel/cmdctl-accounts.csv. It contains
+ the user named "root" with the password "bind10".)
+
+ The administrator may create a user account with the b10-cmdctl-usermgr
+ tool.
+
+ By default the HTTPS server listens on the localhost port 8080. The port
+ can be set by using the --port command line option. The address to listen
+ on can be set using the --address command line argument. Each HTTPS
+ connection is stateless and timesout in 1200 seconds by default. This can
+ be redefined by using the --idle-timeout command line argument.
+
+Configuration specification for b10-cmdctl
+
+ The configuration items for b10-cmdctl are: key_file cert_file
+ accounts_file
+
+ The control commands are: print_settings shutdown
+
+Chapter 7. Control and configure user interface
+
+ Note
+
+ For this development prototype release, bindctl is the only user
+ interface. It is expected that upcoming releases will provide another
+ interactive command-line interface and a web-based interface for
+ controlling and configuring BIND 10.
+
+ The bindctl tool provides an interactive prompt for configuring,
+ controlling, and querying the BIND 10 components. It communicates directly
+ with a REST-ful interface over HTTPS provided by b10-cmdctl. It doesn't
+ communicate to any other components directly.
+
+ Configuration changes are actually commands to b10-cfgmgr. So when bindctl
+ sends a configuration, it is sent to b10-cmdctl (over a HTTPS connection);
+ then b10-cmdctl sends the command (over a b10-msgq command channel) to
+ b10-cfgmgr which then stores the details and relays (over a b10-msgq
+ command channel) the configuration on to the specified module.
+
+Chapter 8. Authoritative Server
+
+ Table of Contents
+
+ Server Configurations
+
+ Data Source Backends
+
+ Loading Master Zones Files
+
+ The b10-auth is the authoritative DNS server. It supports EDNS0 and
+ DNSSEC. It supports IPv6. Normally it is started by the bind10 master
+ process.
+
+Server Configurations
+
+ b10-auth is configured via the b10-cfgmgr configuration manager. The
+ module name is "Auth". The configuration data item is:
+
+ database_file
+ This is an optional string to define the path to find the SQLite3
+ database file. Note: Later the DNS server will use various data
+ source backends. This may be a temporary setting until then.
+
+ The configuration command is:
+
+ shutdown
+ Stop the authoritative DNS server.
+
+Data Source Backends
+
+ Note
+
+ For the development prototype release, b10-auth supports a SQLite3 data
+ source backend and in-memory data source backend. Upcoming versions will
+ be able to use multiple different data sources, such as MySQL and Berkeley
+ DB.
+
+ By default, the SQLite3 backend uses the data file located at
+ /usr/local/var/bind10-devel/zone.sqlite3. (The full path is what was
+ defined at build configure time for --localstatedir. The default is
+ /usr/local/var/.) This data file location may be changed by defining the
+ "database_file" configuration.
+
+Loading Master Zones Files
+
+ RFC 1035 style DNS master zone files may imported into a BIND 10 data
+ source by using the b10-loadzone utility.
+
+ b10-loadzone supports the following special directives (control entries):
+
+ $INCLUDE
+ Loads an additional zone file. This may be recursive.
+
+ $ORIGIN
+ Defines the relative domain name.
+
+ $TTL
+ Defines the time-to-live value used for following records that
+ don't include a TTL.
+
+ The -o argument may be used to define the default origin for loaded zone
+ file records.
+
+ Note
+
+ In the development prototype release, only the SQLite3 back end is used.
+ By default, it stores the zone data in
+ /usr/local/var/bind10-devel/zone.sqlite3 unless the -d switch is used to
+ set the database filename. Multiple zones are stored in a single SQLite3
+ zone database.
+
+ If you reload a zone already existing in the database, all records from
+ that prior zone disappear and a whole new set appears.
+
+Chapter 9. Incoming Zone Transfers
+
+ Table of Contents
+
+ Configuration for Incoming Zone Transfers
+
+ Enabling IXFR
+
+ Trigger an Incoming Zone Transfer Manually
+
+ Incoming zones are transferred using the b10-xfrin process which is
+ started by bind10. When received, the zone is stored in the corresponding
+ BIND 10 data source, and its records can be served by b10-auth. In
+ combination with b10-zonemgr (for automated SOA checks), this allows the
+ BIND 10 server to provide "secondary" service.
+
+ The b10-xfrin process supports both AXFR and IXFR. Due to some
+ implementation limitations of the current development release, however, it
+ only tries AXFR by default, and care should be taken to enable IXFR.
+
+ Note
+
+ In the current development release of BIND 10, incoming zone transfers are
+ only available for SQLite3-based data sources, that is, they don't work
+ for an in-memory data source.
+
+Configuration for Incoming Zone Transfers
+
+ In practice, you need to specify a list of secondary zones to enable
+ incoming zone transfers for these zones (you can still trigger a zone
+ transfer manually, without a prior configuration (see below)).
+
+ For example, to enable zone transfers for a zone named "example.com"
+ (whose master address is assumed to be 2001:db8::53 here), run the
+ following at the bindctl prompt:
+
+ > config add Xfrin/zones
+ > config set Xfrin/zones[0]/name "example.com"
+ > config set Xfrin/zones[0]/master_addr "2001:db8::53"
+ > config commit
+
+ (We assume there has been no zone configuration before).
+
+Enabling IXFR
+
+ As noted above, b10-xfrin uses AXFR for zone transfers by default. To
+ enable IXFR for zone transfers for a particular zone, set the use_ixfr
+ configuration parameter to true. In the above example of configuration
+ sequence, you'll need to add the following before performing commit:
+
+ > config set Xfrin/zones[0]/use_ixfr true
+
+ Note
+
+ One reason why IXFR is disabled by default in the current release is
+ because it does not support automatic fallback from IXFR to AXFR when it
+ encounters a primary server that doesn't support outbound IXFR (and, not
+ many existing implementations support it). Another, related reason is that
+ it does not use AXFR even if it has no knowledge about the zone (like at
+ the very first time the secondary server is set up). IXFR requires the
+ "current version" of the zone, so obviously it doesn't work in this
+ situation and AXFR is the only workable choice. The current release of
+ b10-xfrin does not make this selection automatically. These features will
+ be implemented in a near future version, at which point we will enable
+ IXFR by default.
+
+Trigger an Incoming Zone Transfer Manually
+
+ To manually trigger a zone transfer to retrieve a remote zone, you may use
+ the bindctl utility. For example, at the bindctl prompt run:
+
+ > Xfrin retransfer zone_name="foo.example.org" master=192.0.2.99
+
+Chapter 10. Outbound Zone Transfers
+
+ The b10-xfrout process is started by bind10. When the b10-auth
+ authoritative DNS server receives an AXFR request, b10-xfrout sends the
+ zone. This is used to provide master DNS service to share zones to
+ secondary name servers. The b10-xfrout is also used to send NOTIFY
+ messages to slaves.
+
+ Note
+
+ The current development release of BIND 10 only supports AXFR. (IXFR is
+ not supported.) Access control is not yet provided.
+
+Chapter 11. Secondary Manager
+
+ The b10-zonemgr process is started by bind10. It keeps track of SOA
+ refresh, retry, and expire timers and other details for BIND 10 to perform
+ as a slave. When the b10-auth authoritative DNS server receives a NOTIFY
+ message, b10-zonemgr may tell b10-xfrin to do a refresh to start an
+ inbound zone transfer. The secondary manager resets its counters when a
+ new zone is transferred in.
+
+ Note
+
+ Access control (such as allowing notifies) is not yet provided. The
+ primary/secondary service is not yet complete.
+
+Chapter 12. Recursive Name Server
+
+ Table of Contents
+
+ Access Control
+
+ Forwarding
+
+ The b10-resolver process is started by bind10.
+
+ The main bind10 process can be configured to select to run either the
+ authoritative or resolver or both. By default, it starts the authoritative
+ service. You may change this using bindctl, for example:
+
+ > config set Boss/start_auth false
+ > config set Boss/start_resolver true
+ > config commit
+
+ The master bind10 will stop and start the desired services.
+
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1. The
+ following example shows how it can be configured to listen on an
+ additional address (and port):
+
+ > config add Resolver/listen_on
+ > config set Resolver/listen_on[2]/address "192.168.1.1"
+ > config set Resolver/listen_on[2]/port 53
+ > config commit
+
+ (Replace the "2" as needed; run "config show Resolver/listen_on" if
+ needed.)
+
+Access Control
+
+ By default, the b10-resolver daemon only accepts DNS queries from the
+ localhost (127.0.0.1 and ::1). The Resolver/query_acl configuration may be
+ used to reject, drop, or allow specific IPs or networks. This
+ configuration list is first match.
+
+ The configuration's action item may be set to "ACCEPT" to allow the
+ incoming query, "REJECT" to respond with a DNS REFUSED return code, or
+ "DROP" to ignore the query without any response (such as a blackhole). For
+ more information, see the respective debugging messages:
+ RESOLVER_QUERY_ACCEPTED, RESOLVER_QUERY_REJECTED, and
+ RESOLVER_QUERY_DROPPED.
+
+ The required configuration's from item is set to an IPv4 or IPv6 address,
+ addresses with an network mask, or to the special lowercase keywords
+ "any6" (for any IPv6 address) or "any4" (for any IPv4 address).
+
+ For example to allow the 192.168.1.0/24 network to use your recursive name
+ server, at the bindctl prompt run:
+
+ > config add Resolver/query_acl
+ > config set Resolver/query_acl[2]/action "ACCEPT"
+ > config set Resolver/query_acl[2]/from "192.168.1.0/24"
+ > config commit
+
+ (Replace the "2" as needed; run "config show Resolver/query_acl" if
+ needed.)
+
+ Note
+
+ This prototype access control configuration syntax may be changed.
+
+Forwarding
+
+ To enable forwarding, the upstream address and port must be configured to
+ forward queries to, such as:
+
+ > config set Resolver/forward_addresses [{ "address": "192.168.1.1", "port": 53 }]
+ > config commit
+
+ (Replace 192.168.1.1 to point to your full resolver.)
+
+ Normal iterative name service can be re-enabled by clearing the forwarding
+ address(es); for example:
+
+ > config set Resolver/forward_addresses []
+ > config commit
+
+Chapter 13. Statistics
+
+ The b10-stats process is started by bind10. It periodically collects
+ statistics data from various modules and aggregates it.
+
+ This stats daemon provides commands to identify if it is running, show
+ specified or all statistics data, show specified or all statistics data
+ schema, and set specified statistics data. For example, using bindctl:
+
+ > Stats show
+ {
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
+ }
+
+
+Chapter 14. Logging
+
+ Table of Contents
+
+ Logging configuration
+
+ Loggers
+
+ Output Options
+
+ Example session
+
+ Logging Message Format
+
+Logging configuration
+
+ The logging system in BIND 10 is configured through the Logging module.
+ All BIND 10 modules will look at the configuration in Logging to see what
+ should be logged and to where.
+
+ Loggers
+
+ Within BIND 10, a message is logged through a component called a "logger".
+ Different parts of BIND 10 log messages through different loggers, and
+ each logger can be configured independently of one another.
+
+ In the Logging module, you can specify the configuration for zero or more
+ loggers; any that are not specified will take appropriate default values..
+
+ The three most important elements of a logger configuration are the name
+ (the component that is generating the messages), the severity (what to
+ log), and the output_options (where to log).
+
+ name (string)
+
+ Each logger in the system has a name, the name being that of the component
+ using it to log messages. For instance, if you want to configure logging
+ for the resolver module, you add an entry for a logger named "Resolver".
+ This configuration will then be used by the loggers in the Resolver
+ module, and all the libraries used by it.
+
+ If you want to specify logging for one specific library within the module,
+ you set the name to module.library. For example, the logger used by the
+ nameserver address store component has the full name of "Resolver.nsas".
+ If there is no entry in Logging for a particular library, it will use the
+ configuration given for the module.
+
+ To illustrate this, suppose you want the cache library to log messages of
+ severity DEBUG, and the rest of the resolver code to log messages of
+ severity INFO. To achieve this you specify two loggers, one with the name
+ "Resolver" and severity INFO, and one with the name "Resolver.cache" with
+ severity DEBUG. As there are no entries for other libraries (e.g. the
+ nsas), they will use the configuration for the module ("Resolver"), so
+ giving the desired behavior.
+
+ One special case is that of a module name of "*" (asterisks), which is
+ interpreted as any module. You can set global logging options by using
+ this, including setting the logging configuration for a library that is
+ used by multiple modules (e.g. "*.config" specifies the configuration
+ library code in whatever module is using it).
+
+ If there are multiple logger specifications in the configuration that
+ might match a particular logger, the specification with the more specific
+ logger name takes precedence. For example, if there are entries for for
+ both "*" and "Resolver", the resolver module -- and all libraries it uses
+ -- will log messages according to the configuration in the second entry
+ ("Resolver"). All other modules will use the configuration of the first
+ entry ("*"). If there was also a configuration entry for "Resolver.cache",
+ the cache library within the resolver would use that in preference to the
+ entry for "Resolver".
+
+ One final note about the naming. When specifying the module name within a
+ logger, use the name of the module as specified in bindctl, e.g.
+ "Resolver" for the resolver module, "Xfrout" for the xfrout module, etc.
+ When the message is logged, the message will include the name of the
+ logger generating the message, but with the module name replaced by the
+ name of the process implementing the module (so for example, a message
+ generated by the "Auth.cache" logger will appear in the output with a
+ logger name of "b10-auth.cache").
+
+ severity (string)
+
+ This specifies the category of messages logged. Each message is logged
+ with an associated severity which may be one of the following (in
+ descending order of severity):
+
+ o FATAL
+ o ERROR
+ o WARN
+ o INFO
+ o DEBUG
+
+ When the severity of a logger is set to one of these values, it will only
+ log messages of that severity, and the severities above it. The severity
+ may also be set to NONE, in which case all messages from that logger are
+ inhibited.
+
+ output_options (list)
+
+ Each logger can have zero or more output_options. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ The other options for a logger are:
+
+ debuglevel (integer)
+
+ When a logger's severity is set to DEBUG, this value specifies what debug
+ messages should be printed. It ranges from 0 (least verbose) to 99 (most
+ verbose).
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ additive (true or false)
+
+ If this is true, the output_options from the parent will be used. For
+ example, if there are two loggers configured; "Resolver" and
+ "Resolver.cache", and additive is true in the second, it will write the
+ log messages not only to the destinations specified for "Resolver.cache",
+ but also to the destinations as specified in the output_options in the
+ logger named "Resolver".
+
+ Output Options
+
+ The main settings for an output option are the destination and a value
+ called output, the meaning of which depends on the destination that is
+ set.
+
+ destination (string)
+
+ The destination is the type of output. It can be one of:
+
+ o console
+ o file
+ o syslog
+
+ output (string)
+
+ Depending on what is set as the output destination, this value is
+ interpreted as follows:
+
+ destination is "console"
+ The value of output must be one of "stdout" (messages printed to
+ standard output) or "stderr" (messages printed to standard error).
+
+ destination is "file"
+ The value of output is interpreted as a file name; log messages
+ will be appended to this file.
+
+ destination is "syslog"
+ The value of output is interpreted as the syslog facility (e.g.
+ local0) that should be used for log messages.
+
+ The other options for output_options are:
+
+ flush (true of false)
+
+ Flush buffers after each log message. Doing this will reduce performance
+ but will ensure that if the program terminates abnormally, all messages up
+ to the point of termination are output.
+
+ maxsize (integer)
+
+ Only relevant when destination is file, this is maximum file size of
+ output files in bytes. When the maximum size is reached, the file is
+ renamed and a new file opened. (For example, a ".1" is appended to the
+ name -- if a ".1" file exists, it is renamed ".2", etc.)
+
+ If this is 0, no maximum file size is used.
+
+ maxver (integer)
+
+ Maximum number of old log files to keep around when rolling the output
+ file. Only relevant when destination is "file".
+
+ Example session
+
+ In this example we want to set the global logging to write to the file
+ /var/log/my_bind10.log, at severity WARN. We want the authoritative server
+ to log at DEBUG with debuglevel 40, to a different file
+ (/tmp/debug_messages).
+
+ Start bindctl.
+
+ ["login success "]
+ > config show Logging
+ Logging/loggers [] list
+
+ By default, no specific loggers are configured, in which case the severity
+ defaults to INFO and the output is written to stderr.
+
+ Let's first add a default logger:
+
+ > config add Logging/loggers
+ > config show Logging
+ Logging/loggers/ list (modified)
+
+ The loggers value line changed to indicate that it is no longer an empty
+ list:
+
+ > config show Logging/loggers
+ Logging/loggers[0]/name "" string (default)
+ Logging/loggers[0]/severity "INFO" string (default)
+ Logging/loggers[0]/debuglevel 0 integer (default)
+ Logging/loggers[0]/additive false boolean (default)
+ Logging/loggers[0]/output_options [] list (default)
+
+ The name is mandatory, so we must set it. We will also change the severity
+ as well. Let's start with the global logger.
+
+ > config set Logging/loggers[0]/name *
+ > config set Logging/loggers[0]/severity WARN
+ > config show Logging/loggers
+ Logging/loggers[0]/name "*" string (modified)
+ Logging/loggers[0]/severity "WARN" string (modified)
+ Logging/loggers[0]/debuglevel 0 integer (default)
+ Logging/loggers[0]/additive false boolean (default)
+ Logging/loggers[0]/output_options [] list (default)
+
+ Of course, we need to specify where we want the log messages to go, so we
+ add an entry for an output option.
+
+ > config add Logging/loggers[0]/output_options
+ > config show Logging/loggers[0]/output_options
+ Logging/loggers[0]/output_options[0]/destination "console" string (default)
+ Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+ Logging/loggers[0]/output_options[0]/flush false boolean (default)
+ Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+ Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+
+ These aren't the values we are looking for.
+
+ > config set Logging/loggers[0]/output_options[0]/destination file
+ > config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log
+ > config set Logging/loggers[0]/output_options[0]/maxsize 30000
+ > config set Logging/loggers[0]/output_options[0]/maxver 8
+
+ Which would make the entire configuration for this logger look like:
+
+ > config show all Logging/loggers
+ Logging/loggers[0]/name "*" string (modified)
+ Logging/loggers[0]/severity "WARN" string (modified)
+ Logging/loggers[0]/debuglevel 0 integer (default)
+ Logging/loggers[0]/additive false boolean (default)
+ Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+ Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+ Logging/loggers[0]/output_options[0]/flush false boolean (default)
+ Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+ Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+
+ That looks OK, so let's commit it before we add the configuration for the
+ authoritative server's logger.
+
+ > config commit
+
+ Now that we have set it, and checked each value along the way, adding a
+ second entry is quite similar.
+
+ > config add Logging/loggers
+ > config set Logging/loggers[1]/name Auth
+ > config set Logging/loggers[1]/severity DEBUG
+ > config set Logging/loggers[1]/debuglevel 40
+ > config add Logging/loggers[1]/output_options
+ > config set Logging/loggers[1]/output_options[0]/destination file
+ > config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log
+ > config commit
+
+ And that's it. Once we have found whatever it was we needed the debug
+ messages for, we can simply remove the second logger to let the
+ authoritative server use the same settings as the rest.
+
+ > config remove Logging/loggers[1]
+ > config commit
+
+ And every module will now be using the values from the logger named "*".
+
+Logging Message Format
+
+ Each message written by BIND 10 to the configured logging destinations
+ comprises a number of components that identify the origin of the message
+ and, if the message indicates a problem, information about the problem
+ that may be useful in fixing it.
+
+ Consider the message below logged to a file:
+
+ 2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)
+
+ Note: the layout of messages written to the system logging file (syslog)
+ may be slightly different. This message has been split across two lines
+ here for display reasons; in the logging file, it will appear on one
+ line.)
+
+ The log message comprises a number of components:
+
+ 2011-06-15 13:48:22.034
+
+ The date and time at which the message was generated.
+
+ ERROR
+
+ The severity of the message.
+
+ [b10-resolver.asiolink]
+
+ The source of the message. This comprises two components: the BIND
+ 10 process generating the message (in this case, b10-resolver) and
+ the module within the program from which the message originated
+ (which in the example is the asynchronous I/O link module,
+ asiolink).
+
+ ASIODNS_OPENSOCK
+
+ The message identification. Every message in BIND 10 has a unique
+ identification, which can be used as an index into the BIND 10
+ Messages Manual (http://bind10.isc.org/docs/bind10-messages.html)
+ from which more information can be obtained.
+
+ error 111 opening TCP socket to 127.0.0.1(53)
+
+ A brief description of the cause of the problem. Within this text,
+ information relating to the condition that caused the message to
+ be logged will be included. In this example, error number 111 (an
+ operating system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the local system
+ (address 127.0.0.1). The next step would be to find out the reason
+ for the failure by consulting your system's documentation to
+ identify what error number 111 means.
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index d34746b..21bb671 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -41,9 +41,10 @@
</para>
<para>
This is the reference guide for BIND 10 version &__VERSION__;.
- The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <ulink
- url="http://bind10.isc.org/docs"/>. </para> </abstract>
+ The most up-to-date version of this document (in PDF, HTML,
+ and plain text formats), along with other documents for
+ BIND 10, can be found at <ulink url="http://bind10.isc.org/docs"/>.
+ </para> </abstract>
<releaseinfo>This is the reference guide for BIND 10 version
&__VERSION__;.</releaseinfo>
@@ -1257,21 +1258,80 @@ TODO
<para>
Incoming zones are transferred using the <command>b10-xfrin</command>
process which is started by <command>bind10</command>.
- When received, the zone is stored in the BIND 10
- data store, and its records can be served by
+ When received, the zone is stored in the corresponding BIND 10
+ data source, and its records can be served by
<command>b10-auth</command>.
In combination with <command>b10-zonemgr</command> (for
automated SOA checks), this allows the BIND 10 server to
provide <quote>secondary</quote> service.
</para>
+ <para>
+ The <command>b10-xfrin</command> process supports both AXFR and
+ IXFR. Due to some implementation limitations of the current
+ development release, however, it only tries AXFR by default,
+ and care should be taken to enable IXFR.
+ </para>
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
+
<note><simpara>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ In the current development release of BIND 10, incoming zone
+ transfers are only available for SQLite3-based data sources,
+ that is, they don't work for an in-memory data source.
+ </simpara></note>
-<!-- TODO: sqlite3 data source only? -->
+ <section>
+ <title>Configuration for Incoming Zone Transfers</title>
+ <para>
+ In practice, you need to specify a list of secondary zones to
+ enable incoming zone transfers for these zones (you can still
+ trigger a zone transfer manually, without a prior configuration
+ (see below)).
+ </para>
- </simpara></note>
+ <para>
+ For example, to enable zone transfers for a zone named "example.com"
+ (whose master address is assumed to be 2001:db8::53 here),
+ run the following at the <command>bindctl</command> prompt:
+
+ <screen>> <userinput>config add Xfrin/zones</userinput>
+> <userinput>config set Xfrin/zones[0]/name "<option>example.com</option>"</userinput>
+> <userinput>config set Xfrin/zones[0]/master_addr "<option>2001:db8::53</option>"</userinput>
+> <userinput>config commit</userinput></screen>
+
+ (We assume there has been no zone configuration before).
+ </para>
+ </section>
+
+ <section>
+ <title>Enabling IXFR</title>
+ <para>
+ As noted above, <command>b10-xfrin</command> uses AXFR for
+ zone transfers by default. To enable IXFR for zone transfers
+ for a particular zone, set the <userinput>use_ixfr</userinput>
+ configuration parameter to <userinput>true</userinput>.
+ In the above example of configuration sequence, you'll need
+ to add the following before performing <userinput>commit</userinput>:
+ <screen>> <userinput>config set Xfrin/zones[0]/use_ixfr true</userinput></screen>
+ </para>
+
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
+ <note><simpara>
+ One reason why IXFR is disabled by default in the current
+ release is because it does not support automatic fallback from IXFR to
+ AXFR when it encounters a primary server that doesn't support
+ outbound IXFR (and, not many existing implementations support
+ it). Another, related reason is that it does not use AXFR even
+ if it has no knowledge about the zone (like at the very first
+ time the secondary server is set up). IXFR requires the
+ "current version" of the zone, so obviously it doesn't work
+ in this situation and AXFR is the only workable choice.
+ The current release of <command>b10-xfrin</command> does not
+ make this selection automatically.
+ These features will be implemented in a near future
+ version, at which point we will enable IXFR by default.
+ </simpara></note>
+ </section>
<!-- TODO:
@@ -1284,13 +1344,18 @@ what if a NOTIFY is sent?
-->
- <para>
- To manually trigger a zone transfer to retrieve a remote zone,
- you may use the <command>bindctl</command> utility.
- For example, at the <command>bindctl</command> prompt run:
+ <section>
+ <title>Trigger an Incoming Zone Transfer Manually</title>
+
+ <para>
+ To manually trigger a zone transfer to retrieve a remote zone,
+ you may use the <command>bindctl</command> utility.
+ For example, at the <command>bindctl</command> prompt run:
+
+ <screen>> <userinput>Xfrin retransfer zone_name="<option>foo.example.org</option>" master=<option>192.0.2.99</option></userinput></screen>
+ </para>
+ </section>
- <screen>> <userinput>Xfrin retransfer zone_name="<option>foo.example.org</option>" master=<option>192.0.2.99</option></userinput></screen>
- </para>
<!-- TODO: can that retransfer be used to identify a new zone? -->
<!-- TODO: what if doesn't exist at that master IP? -->
@@ -1522,24 +1587,30 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<para>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <command>bindctl</command>:
<screen>
> <userinput>Stats show</userinput>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</screen>
</para>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index f5c44b3..bade381 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -244,6 +244,14 @@ packet.
</para></listitem>
</varlistentry>
+<varlistentry id="AUTH_INVALID_STATISTICS_DATA">
+<term>AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</term>
+<listitem><para>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="AUTH_LOAD_TSIG">
<term>AUTH_LOAD_TSIG loading TSIG keys</term>
<listitem><para>
@@ -581,6 +589,14 @@ started according to the configuration.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_INVALID_STATISTICS_DATA">
+<term>BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</term>
+<listitem><para>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_INVALID_USER">
<term>BIND10_INVALID_USER invalid user: %1</term>
<listitem><para>
@@ -1150,7 +1166,7 @@ Debug message. The resolver is trying to look up data in the RRset cache.
</varlistentry>
<varlistentry id="CACHE_RRSET_NOT_FOUND">
-<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</term>
+<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</term>
<listitem><para>
Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
in the cache.
@@ -1773,13 +1789,12 @@ means no limit.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_DATABASE_FIND_ERROR">
-<term>DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</term>
+<varlistentry id="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED">
+<term>DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</term>
<listitem><para>
-This was an internal error while reading data from a datasource. This can either
-mean the specific data source implementation is not behaving correctly, or the
-data it provides is invalid. The current search is aborted.
-The error message contains specific information about the error.
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
</para></listitem>
</varlistentry>
@@ -1795,28 +1810,9 @@ name and type in the database.
<term>DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</term>
<listitem><para>
The datasource backend provided resource records for the given RRset with
-different TTL values. The TTL of the RRSET is set to the lowest value, which
-is printed in the log message.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR">
-<term>DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</term>
-<listitem><para>
-There was an uncaught general exception while reading data from a datasource.
-This most likely points to a logic error in the code, and can be considered a
-bug. The current search is aborted. Specific information about the exception is
-printed in this error message.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR">
-<term>DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</term>
-<listitem><para>
-There was an uncaught ISC exception while reading data from a datasource. This
-most likely points to a logic error in the code, and can be considered a bug.
-The current search is aborted. Specific information about the exception is
-printed in this error message.
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
</para></listitem>
</varlistentry>
@@ -1846,6 +1842,15 @@ instead.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL">
+<term>DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</term>
+<listitem><para>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DATABASE_FOUND_NXDOMAIN">
<term>DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</term>
<listitem><para>
@@ -1871,6 +1876,132 @@ returned is printed.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_ITERATE">
+<term>DATASRC_DATABASE_ITERATE iterating zone %1</term>
+<listitem><para>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_END">
+<term>DATASRC_DATABASE_ITERATE_END iterating zone finished</term>
+<listitem><para>
+While iterating through the zone, the program reached end of the data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_NEXT">
+<term>DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</term>
+<listitem><para>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_TTL_MISMATCH">
+<term>DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</term>
+<listitem><para>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
+<term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
+<listitem><para>
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_CREATED">
+<term>DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</term>
+<listitem><para>
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_DESTROYED">
+<term>DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</term>
+<listitem><para>
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_ROLLBACK">
+<term>DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</term>
+<listitem><para>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL">
+<term>DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</term>
+<listitem><para>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD">
+<term>DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</term>
+<listitem><para>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_CANCEL_NS">
+<term>DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</term>
+<listitem><para>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_CANCEL_SUB">
+<term>DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</term>
+<listitem><para>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_EMPTY">
+<term>DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</term>
+<listitem><para>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DO_QUERY">
<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
<listitem><para>
@@ -2750,6 +2881,15 @@ generated.
</para></listitem>
</varlistentry>
+<varlistentry id="LIBXFRIN_DIFFERENT_TTL">
+<term>LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</term>
+<listitem><para>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
@@ -4103,21 +4243,17 @@ configuration update from the configuration manager.
</para></listitem>
</varlistentry>
-<varlistentry id="STATS_RECEIVED_REMOVE_COMMAND">
-<term>STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</term>
+<varlistentry id="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</term>
<listitem><para>
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
+The stats module received a command to show all statistics schemas of all modules.
</para></listitem>
</varlistentry>
-<varlistentry id="STATS_RECEIVED_RESET_COMMAND">
-<term>STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</term>
+<varlistentry id="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</term>
<listitem><para>
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
+The stats module received a command to show the specified statistics schema of the specified module.
</para></listitem>
</varlistentry>
@@ -4168,6 +4304,21 @@ to send its data to the stats module.
</para></listitem>
</varlistentry>
+<varlistentry id="STATS_STARTING">
+<term>STATS_STARTING starting</term>
+<listitem><para>
+The stats module will be now starting.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_START_ERROR">
+<term>STATS_START_ERROR stats module error: %1</term>
+<listitem><para>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="STATS_STOPPED_BY_KEYBOARD">
<term>STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
<listitem><para>
@@ -4191,39 +4342,28 @@ Please check your installation.
<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
<listitem><para>
The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
</para></listitem>
</varlistentry>
-<varlistentry id="XFRIN_AXFR_INTERNAL_FAILURE">
-<term>XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<varlistentry id="XFRIN_AXFR_INCONSISTENT_SOA">
+<term>XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</term>
<listitem><para>
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
-The error is shown in the log message.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFRIN_AXFR_TRANSFER_FAILURE">
-<term>XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</term>
-<listitem><para>
-The AXFR transfer for the given zone has failed due to a protocol error.
-The error is shown in the log message.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFRIN_AXFR_TRANSFER_STARTED">
-<term>XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</term>
-<listitem><para>
-A connection to the master server has been made, the serial value in
-the SOA record has been checked, and a zone transfer has been started.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFRIN_AXFR_TRANSFER_SUCCESS">
-<term>XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</term>
-<listitem><para>
-The AXFR transfer of the given zone was successfully completed.
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same. According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold. There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected. On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs. For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found. If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
</para></listitem>
</varlistentry>
@@ -4280,6 +4420,27 @@ shown in the log message.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_GOT_INCREMENTAL_RESP">
+<term>XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</term>
+<listitem><para>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_GOT_NONINCREMENTAL_RESP">
+<term>XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</term>
+<listitem><para>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA. Non incremental transfer is
+either AXFR or AXFR-style IXFR. In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_IMPORT_DNS">
<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
<listitem><para>
@@ -4305,6 +4466,16 @@ likely means that the msgq daemon has quit or was killed.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_NOTIFY_UNKNOWN_MASTER">
+<term>XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</term>
+<listitem><para>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
<listitem><para>
@@ -4338,6 +4509,38 @@ exception message is printed in the log message.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_XFR_OTHER_FAILURE">
+<term>XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</term>
+<listitem><para>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
+<term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
+<listitem><para>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
+<term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_SUCCESS">
+<term>XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</term>
+<listitem><para>
+The XFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
<listitem><para>
@@ -4401,6 +4604,14 @@ configuration manager b10-cfgmgr is not running.
</para></listitem>
</varlistentry>
+<varlistentry id="XFROUT_CONFIG_ERROR">
+<term>XFROUT_CONFIG_ERROR error found in configuration data: %1</term>
+<listitem><para>
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
<listitem><para>
@@ -4430,6 +4641,17 @@ system and your specific installation.
</para></listitem>
</varlistentry>
+<varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
+<term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
+<listitem><para>
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFROUT_NEW_CONFIG">
<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
<listitem><para>
diff --git a/ext/asio/asio/impl/error_code.ipp b/ext/asio/asio/impl/error_code.ipp
index ed37a17..218c09b 100644
--- a/ext/asio/asio/impl/error_code.ipp
+++ b/ext/asio/asio/impl/error_code.ipp
@@ -11,6 +11,9 @@
#ifndef ASIO_IMPL_ERROR_CODE_IPP
#define ASIO_IMPL_ERROR_CODE_IPP
+// strerror() needs <cstring>
+#include <cstring>
+
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index e3128b5..4d8ec83 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -50,6 +50,12 @@ b10_auth_SOURCES += command.cc command.h
b10_auth_SOURCES += common.h common.cc
b10_auth_SOURCES += statistics.cc statistics.h
b10_auth_SOURCES += main.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+b10_auth_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
EXTRA_DIST += auth_messages.mes
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index 9f04b76..1ffa687 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -257,4 +257,7 @@ request. The zone manager component has been informed of the request,
but has returned an error response (which is included in the message). The
NOTIFY request will not be honored.
+% AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 5a31442..c9dac88 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -125,6 +125,10 @@ public:
/// The TSIG keyring
const shared_ptr<TSIGKeyRing>* keyring_;
+
+ /// Bind the ModuleSpec object in config_session_ with
+ /// isc:config::ModuleSpec::validateStatistics.
+ void registerStatisticsValidator();
private:
std::string db_file_;
@@ -139,6 +143,9 @@ private:
/// Increment query counter
void incCounter(const int protocol);
+
+ // validateStatistics
+ bool validateStatistics(isc::data::ConstElementPtr data) const;
};
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
@@ -317,6 +324,7 @@ AuthSrv::setXfrinSession(AbstractSession* xfrin_session) {
void
AuthSrv::setConfigSession(ModuleCCSession* config_session) {
impl_->config_session_ = config_session;
+ impl_->registerStatisticsValidator();
}
void
@@ -670,6 +678,22 @@ AuthSrvImpl::incCounter(const int protocol) {
}
}
+void
+AuthSrvImpl::registerStatisticsValidator() {
+ counters_.registerStatisticsValidator(
+ boost::bind(&AuthSrvImpl::validateStatistics, this, _1));
+}
+
+bool
+AuthSrvImpl::validateStatistics(isc::data::ConstElementPtr data) const {
+ if (config_session_ == NULL) {
+ return (false);
+ }
+ return (
+ config_session_->getModuleSpec().validateStatistics(
+ data, true));
+}
+
ConstElementPtr
AuthSrvImpl::setDbFile(ConstElementPtr config) {
ConstElementPtr answer = isc::config::createAnswer();
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index d51495b..53c019f 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -13,6 +13,12 @@ query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
query_bench_SOURCES += ../auth_config.h ../auth_config.cc
query_bench_SOURCES += ../statistics.h ../statistics.cc
query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+query_bench_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 76e5007..e62719f 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -37,11 +37,14 @@ public:
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
+ void registerStatisticsValidator
+ (AuthCounters::validator_type validator);
// Currently for testing purpose only
uint64_t getCounter(const AuthCounters::CounterType type) const;
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
+ AuthCounters::validator_type validator_;
};
AuthCountersImpl::AuthCountersImpl() :
@@ -67,16 +70,25 @@ AuthCountersImpl::submitStatistics() const {
}
std::stringstream statistics_string;
statistics_string << "{\"command\": [\"set\","
- << "{ \"stats_data\": "
- << "{ \"auth.queries.udp\": "
+ << "{ \"owner\": \"Auth\","
+ << " \"data\":"
+ << "{ \"queries.udp\": "
<< counters_.at(AuthCounters::COUNTER_UDP_QUERY)
- << ", \"auth.queries.tcp\": "
+ << ", \"queries.tcp\": "
<< counters_.at(AuthCounters::COUNTER_TCP_QUERY)
<< " }"
<< "}"
<< "]}";
isc::data::ConstElementPtr statistics_element =
isc::data::Element::fromJSON(statistics_string);
+ // validate the statistics data before send
+ if (validator_) {
+ if (!validator_(
+ statistics_element->get("command")->get(1)->get("data"))) {
+ LOG_ERROR(auth_logger, AUTH_INVALID_STATISTICS_DATA);
+ return (false);
+ }
+ }
try {
// group_{send,recv}msg() can throw an exception when encountering
// an error, and group_recvmsg() will throw an exception on timeout.
@@ -105,6 +117,13 @@ AuthCountersImpl::setStatisticsSession
statistics_session_ = statistics_session;
}
+void
+AuthCountersImpl::registerStatisticsValidator
+ (AuthCounters::validator_type validator)
+{
+ validator_ = validator;
+}
+
// Currently for testing purpose only
uint64_t
AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
@@ -139,3 +158,10 @@ uint64_t
AuthCounters::getCounter(const AuthCounters::CounterType type) const {
return (impl_->getCounter(type));
}
+
+void
+AuthCounters::registerStatisticsValidator
+ (AuthCounters::validator_type validator) const
+{
+ return (impl_->registerStatisticsValidator(validator));
+}
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 5bf6436..c930414 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -131,6 +131,26 @@ public:
/// \return the value of the counter specified by \a type.
///
uint64_t getCounter(const AuthCounters::CounterType type) const;
+
+ /// \brief A type of validation function for the specification in
+ /// isc::config::ModuleSpec.
+ ///
+ /// This type might be useful for not only statistics
+ /// specificatoin but also for config_data specification and for
+ /// commnad.
+ ///
+ typedef boost::function<bool(const isc::data::ConstElementPtr&)>
+ validator_type;
+
+ /// \brief Register a function type of the statistics validation
+ /// function for AuthCounters.
+ ///
+ /// This method never throws an exception.
+ ///
+ /// \param validator A function type of the validation of
+ /// statistics specification.
+ ///
+ void registerStatisticsValidator(AuthCounters::validator_type validator) const;
};
#endif // __STATISTICS_H
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index 5cd2f5a..d27386e 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -37,6 +37,13 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += change_user_unittest.cc
run_unittests_SOURCES += statistics_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+run_unittests_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 9a3dded..98e573b 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -16,6 +16,8 @@
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+
#include <cc/data.h>
#include <cc/session.h>
@@ -76,6 +78,13 @@ protected:
}
MockSession statistics_session_;
AuthCounters counters;
+ // no need to be inherited from the original class here.
+ class MockModuleSpec {
+ public:
+ bool validateStatistics(ConstElementPtr, const bool valid) const
+ { return (valid); }
+ };
+ MockModuleSpec module_spec_;
};
void
@@ -181,7 +190,7 @@ TEST_F(AuthCountersTest, submitStatisticsWithException) {
statistics_session_.setThrowSessionTimeout(false);
}
-TEST_F(AuthCountersTest, submitStatistics) {
+TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
// Submit statistics data.
// Validate if it submits correct data.
@@ -201,12 +210,69 @@ TEST_F(AuthCountersTest, submitStatistics) {
// Command is "set".
EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
ConstElementPtr statistics_data = statistics_session_.sent_msg
->get("command")->get(1)
- ->get("stats_data");
+ ->get("data");
// UDP query counter is 2 and TCP query counter is 1.
- EXPECT_EQ(2, statistics_data->get("auth.queries.udp")->intValue());
- EXPECT_EQ(1, statistics_data->get("auth.queries.tcp")->intValue());
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
}
+TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
+
+ //a validator for the unittest
+ AuthCounters::validator_type validator;
+ ConstElementPtr el;
+
+ // Submit statistics data with correct statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, true);
+
+ EXPECT_TRUE(validator(el));
+
+ // register validator to AuthCounters
+ counters.registerStatisticsValidator(validator);
+
+ // Counters should be initialized to 0.
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+
+ // UDP query counter is set to 2.
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ // TCP query counter is set to 1.
+ counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+
+ // checks the value returned by submitStatistics
+ EXPECT_TRUE(counters.submitStatistics());
+
+ // Destination is "Stats".
+ EXPECT_EQ("Stats", statistics_session_.msg_destination);
+ // Command is "set".
+ EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
+ ->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
+ ConstElementPtr statistics_data = statistics_session_.sent_msg
+ ->get("command")->get(1)
+ ->get("data");
+ // UDP query counter is 2 and TCP query counter is 1.
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
+
+ // Submit statistics data with incorrect statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, false);
+
+ EXPECT_FALSE(validator(el));
+
+ counters.registerStatisticsValidator(validator);
+
+ // checks the value returned by submitStatistics
+ EXPECT_FALSE(counters.submitStatistics());
+}
}
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 5ec0c9f..69ea256 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -33,6 +33,7 @@ $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
+ -e "s|@@LIBDIR@@|$(libdir)|" \
-e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
chmod a+x $@
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 4bac069..4debcdb 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -198,3 +198,7 @@ the message channel.
% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
An unknown child process has exited. The PID is printed, but no further
action will be taken by the boss process.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 28af8cc..94747f2 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -85,7 +85,7 @@ isc.util.process.rename(sys.argv[0])
# number, and the overall BIND 10 version number (set in configure.ac).
VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-# This is for bind10.boottime of stats module
+# This is for boot_time of Boss
_BASETIME = time.gmtime()
class RestartSchedule:
@@ -308,9 +308,11 @@ class BoB:
return process_list
def _get_stats_data(self):
- return { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }}
+ return { "owner": "Boss",
+ "data": { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+ }
def command_handler(self, command, args):
logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
@@ -325,15 +327,22 @@ class BoB:
answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
elif command == "sendstats":
# send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', self._get_stats_data())
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- # Consume the answer, in case it becomes a orphan message.
- try:
- self.cc_session.group_recvmsg(False, seq)
- except isc.cc.session.SessionTimeout:
- pass
- answer = isc.config.ccsession.create_answer(0)
+ stats_data = self._get_stats_data()
+ valid = self.ccs.get_module_spec().validate_statistics(
+ True, stats_data["data"])
+ if valid:
+ cmd = isc.config.ccsession.create_command('set', stats_data)
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ # Consume the answer, in case it becomes a orphan message.
+ try:
+ self.cc_session.group_recvmsg(False, seq)
+ except isc.cc.session.SessionTimeout:
+ pass
+ answer = isc.config.ccsession.create_answer(0)
+ else:
+ logger.fatal(BIND10_INVALID_STATISTICS_DATA);
+ answer = isc.config.ccsession.create_answer(
+ 1, "specified statistics data is invalid")
elif command == "ping":
answer = isc.config.ccsession.create_answer(0, "pong")
elif command == "show_processes":
@@ -570,6 +579,22 @@ class BoB:
self.start_simple("b10-xfrout", c_channel_env)
def start_xfrin(self, c_channel_env):
+ # XXX: a quick-hack workaround. xfrin will implicitly use dynamically
+ # loadable data source modules, which will be installed in $(libdir).
+ # On some OSes (including MacOS X and *BSDs) the main process (python)
+ # cannot find the modules unless they are located in a common shared
+ # object path or a path in the (DY)LD_LIBRARY_PATH. We should seek
+ # a cleaner solution, but for a short term workaround we specify the
+ # path here, unconditionally, and without even bothering which
+ # environment variable should be used.
+ if not "B10_FROM_SOURCE" in os.environ:
+ cur_path = os.getenv('DYLD_LIBRARY_PATH')
+ cur_path = '' if cur_path is None else ':' + cur_path
+ c_channel_env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+
+ cur_path = os.getenv('LD_LIBRARY_PATH')
+ cur_path = '' if cur_path is None else ':' + cur_path
+ c_channel_env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
self.start_simple("b10-xfrin", c_channel_env)
def start_zonemgr(self, c_channel_env):
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 424a610..2efd940 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -137,9 +137,27 @@ class TestBoB(unittest.TestCase):
def group_sendmsg(self, msg, group):
(self.msg, self.group) = (msg, group)
def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Boss",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
bob = BoB()
bob.verbose = True
bob.cc_session = DummySession()
+ bob.ccs = DummyModuleCCSession()
# a bad command
self.assertEqual(bob.command_handler(-1, None),
isc.config.ccsession.create_answer(1, "bad command"))
@@ -150,8 +168,9 @@ class TestBoB(unittest.TestCase):
# "getstats" command
self.assertEqual(bob.command_handler("getstats", None),
isc.config.ccsession.create_answer(0,
- { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
}}))
# "sendstats" command
self.assertEqual(bob.command_handler("sendstats", None),
@@ -159,8 +178,9 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.cc_session.group, "Stats")
self.assertEqual(bob.cc_session.msg,
isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ "set", { "owner": "Boss",
+ "data": {
+ "boot_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", _BASETIME)
}}))
# "ping" command
self.assertEqual(bob.command_handler("ping", None),
diff --git a/src/bin/cfgmgr/plugins/Makefile.am b/src/bin/cfgmgr/plugins/Makefile.am
index 529a4ed..5a4cfef 100644
--- a/src/bin/cfgmgr/plugins/Makefile.am
+++ b/src/bin/cfgmgr/plugins/Makefile.am
@@ -1,11 +1,14 @@
SUBDIRS = tests
-EXTRA_DIST = README tsig_keys.py tsig_keys.spec
-EXTRA_DIST += logging.spec b10logging.py
+
+EXTRA_DIST = README logging.spec tsig_keys.spec
config_plugindir = @prefix@/share/@PACKAGE@/config_plugins
-config_plugin_DATA = tsig_keys.py tsig_keys.spec
-config_plugin_DATA += b10logging.py logging.spec
+config_plugin_DATA = logging.spec tsig_keys.spec
+
+python_PYTHON = b10logging.py tsig_keys.py
+pythondir = $(config_plugindir)
+CLEANFILES = b10logging.pyc tsig_keys.pyc
CLEANDIRS = __pycache__
clean-local:
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index 6665573..805d6bb 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -31,15 +31,20 @@ spec_config.h: spec_config.h.pre
BUILT_SOURCES = spec_config.h
pkglibexec_PROGRAMS = b10-dhcp6
-b10_dhcp6_SOURCES = main.cc iface_mgr.cc dhcp6_srv.cc
-b10_dhcp6_SOURCES += iface_mgr.h dhcp6_srv.h
-
-b10_dhcp6_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp.la
+b10_dhcp6_SOURCES = main.cc iface_mgr.cc pkt6.cc dhcp6_srv.cc
+b10_dhcp6_SOURCES += iface_mgr.h pkt6.h dhcp6_srv.h dhcp6.h
+b10_dhcp6_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libcc.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/liblog.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+b10_dhcp6_LDADD += $(SQLITE_LIBS)
# TODO: config.h.in is wrong because doesn't honor pkgdatadir
# and can't use @datadir@ because doesn't expand default ${prefix}
diff --git a/src/bin/dhcp6/dhcp6.h b/src/bin/dhcp6/dhcp6.h
new file mode 100644
index 0000000..b5512f3
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6.h
@@ -0,0 +1,184 @@
+// Copyright (C) 2006-2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCP6_H
+#define DHCP6_H
+
+/* DHCPv6 Option codes: */
+
+#define D6O_CLIENTID 1 /* RFC3315 */
+#define D6O_SERVERID 2
+#define D6O_IA_NA 3
+#define D6O_IA_TA 4
+#define D6O_IAADDR 5
+#define D6O_ORO 6
+#define D6O_PREFERENCE 7
+#define D6O_ELAPSED_TIME 8
+#define D6O_RELAY_MSG 9
+/* Option code 10 unassigned. */
+#define D6O_AUTH 11
+#define D6O_UNICAST 12
+#define D6O_STATUS_CODE 13
+#define D6O_RAPID_COMMIT 14
+#define D6O_USER_CLASS 15
+#define D6O_VENDOR_CLASS 16
+#define D6O_VENDOR_OPTS 17
+#define D6O_INTERFACE_ID 18
+#define D6O_RECONF_MSG 19
+#define D6O_RECONF_ACCEPT 20
+#define D6O_SIP_SERVERS_DNS 21 /* RFC3319 */
+#define D6O_SIP_SERVERS_ADDR 22 /* RFC3319 */
+#define D6O_NAME_SERVERS 23 /* RFC3646 */
+#define D6O_DOMAIN_SEARCH 24 /* RFC3646 */
+#define D6O_IA_PD 25 /* RFC3633 */
+#define D6O_IAPREFIX 26 /* RFC3633 */
+#define D6O_NIS_SERVERS 27 /* RFC3898 */
+#define D6O_NISP_SERVERS 28 /* RFC3898 */
+#define D6O_NIS_DOMAIN_NAME 29 /* RFC3898 */
+#define D6O_NISP_DOMAIN_NAME 30 /* RFC3898 */
+#define D6O_SNTP_SERVERS 31 /* RFC4075 */
+#define D6O_INFORMATION_REFRESH_TIME 32 /* RFC4242 */
+#define D6O_BCMCS_SERVER_D 33 /* RFC4280 */
+#define D6O_BCMCS_SERVER_A 34 /* RFC4280 */
+/* 35 is unassigned */
+#define D6O_GEOCONF_CIVIC 36 /* RFC4776 */
+#define D6O_REMOTE_ID 37 /* RFC4649 */
+#define D6O_SUBSCRIBER_ID 38 /* RFC4580 */
+#define D6O_CLIENT_FQDN 39 /* RFC4704 */
+#define D6O_PANA_AGENT 40 /* paa-option */
+#define D6O_NEW_POSIX_TIMEZONE 41 /* RFC4833 */
+#define D6O_NEW_TZDB_TIMEZONE 42 /* RFC4833 */
+#define D6O_ERO 43 /* RFC4994 */
+#define D6O_LQ_QUERY 44 /* RFC5007 */
+#define D6O_CLIENT_DATA 45 /* RFC5007 */
+#define D6O_CLT_TIME 46 /* RFC5007 */
+#define D6O_LQ_RELAY_DATA 47 /* RFC5007 */
+#define D6O_LQ_CLIENT_LINK 48 /* RFC5007 */
+
+/*
+ * Status Codes, from RFC 3315 section 24.4, and RFC 3633, 5007.
+ */
+#define STATUS_Success 0
+#define STATUS_UnspecFail 1
+#define STATUS_NoAddrsAvail 2
+#define STATUS_NoBinding 3
+#define STATUS_NotOnLink 4
+#define STATUS_UseMulticast 5
+#define STATUS_NoPrefixAvail 6
+#define STATUS_UnknownQueryType 7
+#define STATUS_MalformedQuery 8
+#define STATUS_NotConfigured 9
+#define STATUS_NotAllowed 10
+
+/*
+ * DHCPv6 message types, defined in section 5.3 of RFC 3315
+ */
+#define DHCPV6_SOLICIT 1
+#define DHCPV6_ADVERTISE 2
+#define DHCPV6_REQUEST 3
+#define DHCPV6_CONFIRM 4
+#define DHCPV6_RENEW 5
+#define DHCPV6_REBIND 6
+#define DHCPV6_REPLY 7
+#define DHCPV6_RELEASE 8
+#define DHCPV6_DECLINE 9
+#define DHCPV6_RECONFIGURE 10
+#define DHCPV6_INFORMATION_REQUEST 11
+#define DHCPV6_RELAY_FORW 12
+#define DHCPV6_RELAY_REPL 13
+#define DHCPV6_LEASEQUERY 14
+#define DHCPV6_LEASEQUERY_REPLY 15
+
+extern const char *dhcpv6_type_names[];
+extern const int dhcpv6_type_name_max;
+
+/* DUID type definitions (RFC3315 section 9).
+ */
+#define DUID_LLT 1
+#define DUID_EN 2
+#define DUID_LL 3
+
+/* Offsets into IA_*'s where Option spaces commence. */
+#define IA_NA_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
+#define IA_TA_OFFSET 4 /* IAID only, 4 octets */
+#define IA_PD_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
+
+/* Offset into IAADDR's where Option spaces commence. */
+#define IAADDR_OFFSET 24
+
+/* Offset into IAPREFIX's where Option spaces commence. */
+#define IAPREFIX_OFFSET 25
+
+/* Offset into LQ_QUERY's where Option spaces commence. */
+#define LQ_QUERY_OFFSET 17
+
+/*
+ * DHCPv6 well-known multicast addressess, from section 5.1 of RFC 3315
+ */
+#define ALL_DHCP_RELAY_AGENTS_AND_SERVERS "ff02::1:2"
+#define ALL_DHCP_SERVERS "ff05::1:3"
+
+#define DHCP6_CLIENT_PORT 546
+#define DHCP6_SERVER_PORT 547
+
+/*
+ * DHCPv6 Retransmission Constants (RFC3315 section 5.5, RFC 5007)
+ */
+
+#define SOL_MAX_DELAY 1
+#define SOL_TIMEOUT 1
+#define SOL_MAX_RT 120
+#define REQ_TIMEOUT 1
+#define REQ_MAX_RT 30
+#define REQ_MAX_RC 10
+#define CNF_MAX_DELAY 1
+#define CNF_TIMEOUT 1
+#define CNF_MAX_RT 4
+#define CNF_MAX_RD 10
+#define REN_TIMEOUT 10
+#define REN_MAX_RT 600
+#define REB_TIMEOUT 10
+#define REB_MAX_RT 600
+#define INF_MAX_DELAY 1
+#define INF_TIMEOUT 1
+#define INF_MAX_RT 120
+#define REL_TIMEOUT 1
+#define REL_MAX_RC 5
+#define DEC_TIMEOUT 1
+#define DEC_MAX_RC 5
+#define REC_TIMEOUT 2
+#define REC_MAX_RC 8
+#define HOP_COUNT_LIMIT 32
+#define LQ6_TIMEOUT 1
+#define LQ6_MAX_RT 10
+#define LQ6_MAX_RC 5
+
+/* Leasequery query-types (RFC 5007) */
+
+#define LQ6QT_BY_ADDRESS 1
+#define LQ6QT_BY_CLIENTID 2
+
+/*
+ * DUID time starts 2000-01-01.
+ * This constant is the number of seconds since 1970-01-01,
+ * when the Unix epoch began.
+ */
+#define DUID_TIME_EPOCH 946684800
+
+/* Information-Request Time option (RFC 4242) */
+
+#define IRT_DEFAULT 86400
+#define IRT_MINIMUM 600
+
+#endif
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
index c15fecb..101b079 100644
--- a/src/bin/dhcp6/dhcp6_srv.cc
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -13,18 +13,13 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <config.h>
-#include <dhcp/dhcp6.h>
-#include <dhcp/pkt6.h>
-#include <dhcp6/iface_mgr.h>
-#include <dhcp6/dhcp6_srv.h>
-#include <dhcp/option6_ia.h>
-#include <dhcp/option6_iaaddr.h>
-#include <asiolink/io_address.h>
+
+#include "dhcp6/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+#include "dhcp6/dhcp6_srv.h"
using namespace std;
using namespace isc;
-using namespace isc::dhcp;
-using namespace isc::asiolink;
Dhcpv6Srv::Dhcpv6Srv() {
cout << "Initialization" << endl;
@@ -32,10 +27,6 @@ Dhcpv6Srv::Dhcpv6Srv() {
// first call to instance() will create IfaceMgr (it's a singleton)
// it may throw something if things go wrong
IfaceMgr::instance();
-
- if (!setServerID()) {
- isc_throw(Unexpected, "Failed to set up server-id.");
- }
}
Dhcpv6Srv::~Dhcpv6Srv() {
@@ -44,177 +35,27 @@ Dhcpv6Srv::~Dhcpv6Srv() {
bool
Dhcpv6Srv::run() {
- while (true) {
- boost::shared_ptr<Pkt6> query; // client's message
- boost::shared_ptr<Pkt6> rsp; // server's response
+ for (;;) {
+ Pkt6* pkt;
- query = IfaceMgr::instance().receive();
+ pkt = IfaceMgr::instance().receive();
- if (query) {
- if (!query->unpack()) {
- cout << "Failed to parse incoming packet" << endl;
- continue;
- }
- switch (query->getType()) {
- case DHCPV6_SOLICIT:
- rsp = processSolicit(query);
- break;
- case DHCPV6_REQUEST:
- rsp = processRequest(query);
- break;
- case DHCPV6_RENEW:
- rsp = processRenew(query);
- break;
- case DHCPV6_REBIND:
- rsp = processRebind(query);
- break;
- case DHCPV6_CONFIRM:
- rsp = processConfirm(query);
- break;
- case DHCPV6_RELEASE:
- rsp = processRelease(query);
- break;
- case DHCPV6_DECLINE:
- rsp = processDecline(query);
- break;
- default:
- cout << "Unknown pkt type received:"
- << query->getType() << endl;
- }
-
- cout << "Received " << query->data_len_ << " bytes packet type="
- << query->getType() << endl;
- cout << query->toText();
- if (rsp != boost::shared_ptr<Pkt6>()) {
- rsp->remote_addr_ = query->remote_addr_;
- rsp->local_addr_ = query->local_addr_;
- rsp->remote_port_ = DHCP6_CLIENT_PORT;
- rsp->local_port_ = DHCP6_SERVER_PORT;
- rsp->ifindex_ = query->ifindex_;
- rsp->iface_ = query->iface_;
- cout << "Replying with:" << rsp->getType() << endl;
- cout << rsp->toText();
- cout << "----" << endl;
- if (rsp->pack()) {
- cout << "#### pack successful." << endl;
- }
- IfaceMgr::instance().send(rsp);
- }
+ if (pkt) {
+ cout << "Received " << pkt->data_len_ << " bytes, echoing back."
+ << endl;
+ IfaceMgr::instance().send(*pkt);
+ delete pkt;
}
- // TODO add support for config session (see src/bin/auth/main.cc)
- // so this daemon can be controlled from bob
- }
-
- return (true);
-}
-
-boost::shared_ptr<Option>
-Dhcpv6Srv::getServerID() {
- return serverid_;
-}
-
-bool
-Dhcpv6Srv::setServerID() {
- /// TODO implement this for real once interface detection is done.
- /// Use hardcoded server-id for now
+ // TODO add support for config session (see src/bin/auth/main.cc)
+ // so this daemon can be controlled from bob
+#ifdef _WIN32
+ Sleep(1000);
+#else
+ sleep(1);
+#endif
- boost::shared_array<char> srvid(new char[14]);
- srvid[0] = 0;
- srvid[1] = 1; // DUID type 1 = DUID-LLT (see section 9.2 of RFC3315)
- srvid[2] = 0;
- srvid[3] = 6; // HW type = ethernet (I think. I'm typing this from my head
- // in hotel, without Internet connection)
- for (int i=4; i<14; i++) {
- srvid[i]=i-4;
}
- serverid_ = boost::shared_ptr<Option>(new Option(Option::V6,
- D6O_SERVERID,
- srvid,
- 0, 14));
- return (true);
-}
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processSolicit(boost::shared_ptr<Pkt6> solicit) {
-
- boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_ADVERTISE,
- solicit->getTransid(),
- Pkt6::UDP));
-
- // answer client's IA (this is mostly a dummy,
- // so let's answer only first IA and hope there is only one)
- boost::shared_ptr<Option> ia_opt = solicit->getOption(D6O_IA_NA);
- if (ia_opt) {
- // found IA
- Option * tmp = ia_opt.get();
- Option6IA * ia_req = dynamic_cast<Option6IA*> (tmp);
- if (ia_req) {
- boost::shared_ptr<Option6IA> ia_rsp(new Option6IA(Option::V6, D6O_IA_NA, ia_req->getIAID()));
- ia_rsp->setT1(1500);
- ia_rsp->setT2(2600);
- boost::shared_ptr<Option6IAAddr> addr(new Option6IAAddr(D6O_IAADDR, IOAddress("2001:db8:1234:5678::abcd"), 5000, 7000));
- ia_rsp->addOption(addr);
- reply->addOption(ia_rsp);
- }
- }
-
- // add client-id
- boost::shared_ptr<Option> clientid = solicit->getOption(D6O_CLIENTID);
- if (clientid) {
- reply->addOption(clientid);
- }
-
- // add server-id
- reply->addOption(getServerID());
- return reply;
-}
-
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processRequest(boost::shared_ptr<Pkt6> request) {
- /// TODO: Implement processRequest() for real
- boost::shared_ptr<Pkt6> reply = processSolicit(request);
- reply->setType(DHCPV6_REPLY);
- return reply;
-}
-
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processRenew(boost::shared_ptr<Pkt6> renew) {
- boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
- renew->getTransid(),
- Pkt6::UDP));
- return reply;
-}
-
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processRebind(boost::shared_ptr<Pkt6> rebind) {
- boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
- rebind->getTransid(),
- Pkt6::UDP));
- return reply;
-}
-
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processConfirm(boost::shared_ptr<Pkt6> confirm) {
- boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
- confirm->getTransid(),
- Pkt6::UDP));
- return reply;
-}
-
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processRelease(boost::shared_ptr<Pkt6> release) {
- boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
- release->getTransid(),
- Pkt6::UDP));
- return reply;
-}
-
-boost::shared_ptr<Pkt6>
-Dhcpv6Srv::processDecline(boost::shared_ptr<Pkt6> decline) {
- boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
- decline->getTransid(),
- Pkt6::UDP));
- return reply;
+ return (true);
}
-
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
index 830132b..a02f5f6 100644
--- a/src/bin/dhcp6/dhcp6_srv.h
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -15,15 +15,8 @@
#ifndef DHCPV6_SRV_H
#define DHCPV6_SRV_H
-#include <boost/shared_ptr.hpp>
-#include <dhcp/pkt6.h>
-#include <dhcp/option.h>
#include <iostream>
-namespace test {
-class Dhcpv6SrvTest_Solicit_basic_Test;
-}
-
namespace isc {
class Dhcpv6Srv {
private:
@@ -32,14 +25,6 @@ namespace isc {
Dhcpv6Srv(const Dhcpv6Srv& src);
Dhcpv6Srv& operator=(const Dhcpv6Srv& src);
- boost::shared_ptr<isc::dhcp::Option> getServerID();
-
- // this method sets server-identifier. it loads it from a file or
- // generates using interface link-layer addresses (EUI-64)
- bool setServerID();
-
- boost::shared_ptr<isc::dhcp::Option> serverid_;
-
public:
// default constructor
Dhcpv6Srv();
@@ -48,30 +33,7 @@ namespace isc {
bool run();
protected:
- boost::shared_ptr<Pkt6>
- processSolicit(boost::shared_ptr<Pkt6> solicit);
-
- boost::shared_ptr<Pkt6>
- processRequest(boost::shared_ptr<Pkt6> solicit);
-
- boost::shared_ptr<Pkt6>
- processRenew(boost::shared_ptr<Pkt6> solicit);
-
- boost::shared_ptr<Pkt6>
- processRebind(boost::shared_ptr<Pkt6> solicit);
-
- boost::shared_ptr<Pkt6>
- processConfirm(boost::shared_ptr<Pkt6> solicit);
-
- boost::shared_ptr<Pkt6>
- processRelease(boost::shared_ptr<Pkt6> solicit);
-
- boost::shared_ptr<Pkt6>
- processDecline(boost::shared_ptr<Pkt6> solicit);
-
bool shutdown;
-
- friend class test::Dhcpv6SrvTest_Solicit_basic_Test;
};
};
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
index f7b273b..ca41440 100644
--- a/src/bin/dhcp6/iface_mgr.cc
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -21,12 +21,11 @@
#else
#include <netinet/in.h>
#include <arpa/inet.h>
-#include <net/if.h>
#endif
-#include <dhcp/dhcp6.h>
-#include <dhcp6/iface_mgr.h>
-#include <exceptions/exceptions.h>
+#include "dhcp6/iface_mgr.h"
+#include "dhcp6/dhcp6.h"
+#include "exceptions/exceptions.h"
using namespace std;
using namespace isc;
@@ -41,7 +40,7 @@ void
IfaceMgr::instanceCreate() {
if (instance_) {
// no need to do anything. Instance is already created.
- // Who called it again anyway? Uh oh. Had to be us, as
+ // Who called it again anyway? Uh oh. Had to be us, as
// this is private method.
return;
}
@@ -97,7 +96,7 @@ IfaceMgr::IfaceMgr() {
if (!openSockets()) {
isc_throw(Unexpected, "Failed to open/bind sockets.");
}
- } catch (std::exception& ex) {
+ } catch (const std::exception& ex) {
cout << "IfaceMgr creation failed:" << ex.what() << endl;
// TODO Uncomment this (or call LOG_FATAL) once
@@ -144,7 +143,7 @@ IfaceMgr::detectIfaces() {
iface.addrs_.push_back(addr);
ifaces_.push_back(iface);
interfaces.close();
- } catch (std::exception& ex) {
+ } catch (const std::exception& ex) {
// TODO: deallocate whatever memory we used
// not that important, since this function is going to be
// thrown away as soon as we get proper interface detection
@@ -181,7 +180,6 @@ IfaceMgr::openSockets() {
return (false);
}
#endif
-
sendsock_ = sock;
sock = openSocket(iface->name_,
@@ -249,7 +247,7 @@ IfaceMgr::getIface(const std::string& ifname) {
/**
- * Opens UDP/IPv6 socket and binds it to specific address, interface nad port.
+ * Opens UDP/IPv6 socket and binds it to specific address, interface and port.
*
* @param ifname name of the interface
* @param addr address to be bound.
@@ -263,27 +261,22 @@ int
IfaceMgr::openSocket(const std::string& ifname,
const IOAddress& addr,
int port) {
- struct sockaddr_storage name;
- int name_len;
- struct sockaddr_in6 *addr6;
+ struct sockaddr_in6 addr6;
cout << "Creating socket on " << ifname << "/" << addr.toText()
<< "/port=" << port << endl;
- memset(&name, 0, sizeof(name));
- addr6 = (struct sockaddr_in6 *)&name;
- addr6->sin6_family = AF_INET6;
- addr6->sin6_port = htons(port);
- addr6->sin6_scope_id = if_nametoindex(ifname.c_str());
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_port = htons(port);
+ addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
- memcpy(&addr6->sin6_addr,
+ memcpy(&addr6.sin6_addr,
addr.getAddress().to_v6().to_bytes().data(),
- sizeof(addr6->sin6_addr));
-
+ sizeof(addr6.sin6_addr));
#ifdef HAVE_SA_LEN
- addr6->sin6_len = sizeof(*addr6);
+ addr6->sin6_len = sizeof(addr6);
#endif
- name_len = sizeof(*addr6);
// TODO: use sockcreator once it becomes available
@@ -300,6 +293,7 @@ IfaceMgr::openSocket(const std::string& ifname,
return (-1);
}
#endif
+
/* Set the REUSEADDR option so that we don't fail to start if
we're being restarted. */
int flag = 1;
@@ -316,22 +310,20 @@ IfaceMgr::openSocket(const std::string& ifname,
}
#ifdef _WIN32
- if (::bind(sock, (struct sockaddr *)&name, name_len) < 0) {
+ if (::bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
cout << "Failed to bind socket " << sock << " to " << addr.toText()
<< "/port=" << port << endl;
closesocket(sock);
return (INVALID_SOCKET);
}
#else
- if (bind(sock, (struct sockaddr *)&name, name_len) < 0) {
+ if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
cout << "Failed to bind socket " << sock << " to " << addr.toText()
<< "/port=" << port << endl;
close(sock);
return (-1);
}
#endif
-
-
#ifdef IPV6_RECVPKTINFO
/* RFC3542 - a new way */
if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
@@ -431,7 +423,7 @@ const std::string & mcast) {
* @return True, if transmission was successful. False otherwise.
*/
bool
-IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
+IfaceMgr::send(Pkt6 &pkt) {
#ifdef _WIN32
WSAMSG m;
WSABUF v;
@@ -459,17 +451,17 @@ IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
sockaddr_in6 to;
memset(&to, 0, sizeof(to));
to.sin6_family = AF_INET6;
- to.sin6_port = htons(pkt->remote_port_);
+ to.sin6_port = htons(pkt.remote_port_);
memcpy(&to.sin6_addr,
- pkt->remote_addr_.getAddress().to_v6().to_bytes().data(),
+ pkt.remote_addr_.getAddress().to_v6().to_bytes().data(),
16);
- to.sin6_scope_id = pkt->ifindex_;
+ to.sin6_scope_id = pkt.ifindex_;
#ifdef _WIN32
m.name = (struct sockaddr *)&to;
m.namelen = sizeof(to);
#else
- m.msg_name = (struct sockaddr *)&to;
+ m.msg_name = &to;
m.msg_namelen = sizeof(to);
#endif
@@ -479,13 +471,13 @@ IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
* of data to send, so we declare a single vector entry.)
*/
#ifdef _WIN32
- v.buf = (char *) &pkt->data_[0];
- v.len = pkt->data_len_;
+ v.buf = (char *) &pkt.data_[0];
+ v.len = pkt.data_len_;
m.lpBuffers = &v;
m.dwBufferCount = 1;
#else
- v.iov_base = (char *) &pkt->data_[0];
- v.iov_len = pkt->data_len_;
+ v.iov_base = (char *) &pkt.data_[0];
+ v.iov_len = pkt.data_len_;
m.msg_iov = &v;
m.msg_iovlen = 1;
#endif
@@ -507,7 +499,7 @@ IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
cmsg->cmsg_len = WSA_CMSG_LEN(sizeof(*pktinfo));
pktinfo = (struct in6_pktinfo *)WSA_CMSG_DATA(cmsg);
memset(pktinfo, 0, sizeof(*pktinfo));
- pktinfo->ipi6_ifindex = pkt->ifindex_;
+ pktinfo->ipi6_ifindex = pkt.ifindex_;
m.Control.len = cmsg->cmsg_len;
#else
m.msg_control = control_buf_;
@@ -518,7 +510,7 @@ IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
cmsg->cmsg_len = CMSG_LEN(sizeof(*pktinfo));
pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
memset(pktinfo, 0, sizeof(*pktinfo));
- pktinfo->ipi6_ifindex = pkt->ifindex_;
+ pktinfo->ipi6_ifindex = pkt.ifindex_;
m.msg_controllen = cmsg->cmsg_len;
#endif
@@ -536,10 +528,10 @@ IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
}
cout << "Sent " << result << " bytes." << endl;
- cout << "Sent " << pkt->data_len_ << " bytes over "
- << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
- << " dst=" << pkt->remote_addr_.toText()
- << ", src=" << pkt->local_addr_.toText()
+ cout << "Sent " << pkt.data_len_ << " bytes over "
+ << pkt.iface_ << "/" << pkt.ifindex_ << " interface: "
+ << " dst=" << pkt.remote_addr_.toText()
+ << ", src=" << pkt.local_addr_.toText()
<< endl;
return (result >= 0);
@@ -555,7 +547,7 @@ IfaceMgr::send(boost::shared_ptr<Pkt6> pkt) {
*
* @return Object prepresenting received packet.
*/
-boost::shared_ptr<Pkt6>
+Pkt6*
IfaceMgr::receive() {
#ifdef _WIN32
GUID WSARecvMsg_GUID = WSAID_WSARECVMSG;
@@ -570,12 +562,12 @@ IfaceMgr::receive() {
#ifdef _WIN32
WSACMSGHDR *cmsg;
#else
- struct cmsghdr *cmsg;
+ struct cmsghdr* cmsg;
#endif
struct in6_pktinfo* pktinfo;
struct sockaddr_in6 from;
struct in6_addr to_addr;
- boost::shared_ptr<Pkt6> pkt;
+ Pkt6* pkt;
char addr_str[INET6_ADDRSTRLEN];
try {
@@ -586,10 +578,10 @@ IfaceMgr::receive() {
// we use larger buffer. This buffer limit is checked
// during reception (see iov_len below), so we are
// safe
- pkt = boost::shared_ptr<Pkt6>(new Pkt6(65536));
- } catch (std::exception&) {
+ pkt = new Pkt6(65536);
+ } catch (const std::exception&) {
cout << "Failed to create new packet." << endl;
- return (boost::shared_ptr<Pkt6>()); // NULL
+ return (0);
}
memset(control_buf_, 0, control_buf_len_);
@@ -624,7 +616,7 @@ IfaceMgr::receive() {
m.lpBuffers = &v;
m.dwBufferCount = 1;
#else
- v.iov_base = (char *)&pkt->data_[0];
+ v.iov_base = (char*)&pkt->data_[0];
v.iov_len = pkt->data_len_;
m.msg_iov = &v;
m.msg_iovlen = 1;
@@ -695,16 +687,17 @@ IfaceMgr::receive() {
}
if (!found_pktinfo) {
cout << "Unable to find pktinfo" << endl;
- return (boost::shared_ptr<Pkt6>()); // NULL
+ delete pkt;
+ return (0);
}
} else {
cout << "Failed to receive data." << endl;
- return (boost::shared_ptr<Pkt6>()); // NULL
+ delete pkt;
+ return (0);
}
// That's ugly.
// TODO add IOAddress constructor that will take struct in6_addr*
- // TODO: there's from_bytes() method added in IOAddress. Use it!
#ifdef _WIN32
#define DECONST (void *)
#else
@@ -713,7 +706,6 @@ IfaceMgr::receive() {
inet_ntop(AF_INET6, DECONST &to_addr, addr_str,INET6_ADDRSTRLEN);
pkt->local_addr_ = IOAddress(string(addr_str));
- // TODO: there's from_bytes() method added in IOAddress. Use it!
inet_ntop(AF_INET6, DECONST &from.sin6_addr, addr_str, INET6_ADDRSTRLEN);
pkt->remote_addr_ = IOAddress(string(addr_str));
@@ -725,7 +717,8 @@ IfaceMgr::receive() {
} else {
cout << "Received packet over unknown interface (ifindex="
<< pkt->ifindex_ << ")." << endl;
- return (boost::shared_ptr<Pkt6>()); // NULL
+ delete pkt;
+ return (0);
}
pkt->data_len_ = result;
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
index 67ebefc..118dc33 100644
--- a/src/bin/dhcp6/iface_mgr.h
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -16,9 +16,8 @@
#define IFACE_MGR_H
#include <list>
-#include <boost/shared_ptr.hpp>
-#include <asiolink/io_address.h>
-#include <dhcp/pkt6.h>
+#include "asiolink/io_address.h"
+#include "dhcp6/pkt6.h"
namespace isc {
@@ -61,8 +60,8 @@ namespace isc {
void printIfaces(std::ostream& out = std::cout);
- bool send(boost::shared_ptr<Pkt6> pkt);
- boost::shared_ptr<Pkt6> receive();
+ bool send(Pkt6& pkt);
+ Pkt6* receive();
// don't use private, we need derived classes in tests
protected:
diff --git a/src/bin/dhcp6/pkt6.cc b/src/bin/dhcp6/pkt6.cc
new file mode 100644
index 0000000..0d8fa60
--- /dev/null
+++ b/src/bin/dhcp6/pkt6.cc
@@ -0,0 +1,47 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include "dhcp6/dhcp6.h"
+#include "dhcp6/pkt6.h"
+#include <iostream>
+
+namespace isc {
+
+///
+/// constructor
+///
+/// \param dataLen - length of the data to be allocated
+///
+Pkt6::Pkt6(int dataLen)
+ :local_addr_("::"),
+ remote_addr_("::") {
+ try {
+ data_ = boost::shared_array<char>(new char[dataLen]);
+ data_len_ = dataLen;
+ } catch (const std::exception&) {
+ // TODO move to LOG_FATAL()
+ // let's continue with empty pkt for now
+ std::cout << "Failed to allocate " << dataLen << " bytes."
+ << std::endl;
+ data_len_ = 0;
+ }
+}
+
+Pkt6::~Pkt6() {
+ // no need to delete anything shared_ptr will take care of data_
+}
+
+};
diff --git a/src/bin/dhcp6/pkt6.h b/src/bin/dhcp6/pkt6.h
new file mode 100644
index 0000000..c833a13
--- /dev/null
+++ b/src/bin/dhcp6/pkt6.h
@@ -0,0 +1,62 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef PKT6_H
+#define PKT6_H
+
+#include <iostream>
+#include <boost/shared_array.hpp>
+#include "asiolink/io_address.h"
+
+namespace isc {
+
+ class Pkt6 {
+ public:
+ Pkt6(int len);
+ ~Pkt6();
+
+ // XXX: probably need getter/setter wrappers
+ // and hide fields as protected
+ // buffer that holds memory. It is shared_array as options may
+ // share pointer to this buffer
+ boost::shared_array<char> data_;
+
+ // length of the data
+ int data_len_;
+
+ // local address (destination if receiving packet, source if sending packet)
+ isc::asiolink::IOAddress local_addr_;
+
+ // remote address (source if receiving packet, destination if sending packet)
+ isc::asiolink::IOAddress remote_addr_;
+
+ // name of the network interface the packet was received/to be sent over
+ std::string iface_;
+
+ // interface index (each network interface has assigned unique ifindex
+ // it is functional equvalent of name, but sometimes more useful, e.g.
+ // when using crazy systems that allow spaces in interface names (Windows)
+ int ifindex_;
+
+ // local TDP or UDP port
+ int local_port_;
+
+ // remote TCP or UDP port
+ int remote_port_;
+
+ // XXX: add *a lot* here
+ };
+}
+
+#endif
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index dac3081..ae9d8e3 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -8,14 +8,14 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
@@ -29,9 +29,11 @@ AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
-AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/lib/testutils/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
+CLEANFILES = $(builddir)/interfaces.txt
+
AM_CXXFLAGS = $(B10_CXXFLAGS)
if USE_STATIC_LINK
@@ -43,9 +45,11 @@ if HAVE_GTEST
TESTS += dhcp6_unittests
-dhcp6_unittests_SOURCES = ../iface_mgr.h ../iface_mgr.cc
+dhcp6_unittests_SOURCES = ../pkt6.h ../pkt6.cc
+dhcp6_unittests_SOURCES += ../iface_mgr.h ../iface_mgr.cc
dhcp6_unittests_SOURCES += ../dhcp6_srv.h ../dhcp6_srv.cc
dhcp6_unittests_SOURCES += dhcp6_unittests.cc
+dhcp6_unittests_SOURCES += pkt6_unittest.cc
dhcp6_unittests_SOURCES += iface_mgr_unittest.cc
dhcp6_unittests_SOURCES += dhcp6_srv_unittest.cc
@@ -54,7 +58,6 @@ dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
dhcp6_unittests_LDADD = $(GTEST_LDADD)
dhcp6_unittests_LDADD += $(SQLITE_LIBS)
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index 1758bbc..8f74f86 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -23,18 +23,12 @@
#endif
#include <gtest/gtest.h>
-#include <dhcp/dhcp6.h>
-#include <dhcp6/dhcp6_srv.h>
-#include <dhcp/option6_ia.h>
+#include "dhcp6/dhcp6_srv.h"
using namespace std;
using namespace isc;
-using namespace isc::dhcp;
-
-// namespace has to be named, because friends are defined in Dhcpv6Srv class
-// Maybe it should be isc::test?
-namespace test {
+namespace {
class Dhcpv6SrvTest : public ::testing::Test {
public:
Dhcpv6SrvTest() {
@@ -42,7 +36,7 @@ public:
};
TEST_F(Dhcpv6SrvTest, basic) {
- // there's almost no code now. What's there provides echo capability
+ // there's almost no code now. What's there provides echo capability
// that is just a proof of concept and will be removed soon
// No need to thoroughly test it
@@ -54,47 +48,9 @@ TEST_F(Dhcpv6SrvTest, basic) {
EXPECT_NO_THROW( {
Dhcpv6Srv * srv = new Dhcpv6Srv();
- delete srv;
- });
-
-}
-
-TEST_F(Dhcpv6SrvTest,Solicit_basic) {
- Dhcpv6Srv * srv = 0;
- EXPECT_NO_THROW( srv = new Dhcpv6Srv(); );
-
- boost::shared_ptr<Pkt6> sol =
- boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
- 1234, Pkt6::UDP));
-
- boost::shared_ptr<Option6IA> ia(new Option6IA(Option::V6, D6O_IA_NA, 2345));
- ia->setT1(1501);
- ia->setT2(2601);
- sol->addOption(ia);
-
- // Let's not send address in solicit yet
- // boost::shared_ptr<Option6IAAddr> addr(new Option6IAAddr(D6O_IAADDR,
- // IOAddress("2001:db8:1234:ffff::ffff"), 5001, 7001));
- // ia->addOption(addr);
- // sol->addOption(ia);
-
- boost::shared_ptr<Pkt6> reply = srv->processSolicit(sol);
-
- // check if we get response at all
- ASSERT_TRUE( reply != boost::shared_ptr<Pkt6>() );
-
- EXPECT_EQ( DHCPV6_ADVERTISE, reply->getType() );
- EXPECT_EQ( 1234, reply->getTransid() );
-
- boost::shared_ptr<Option> tmp = reply->getOption(D6O_IA_NA);
- ASSERT_TRUE( tmp != boost::shared_ptr<Option>() );
-
- Option6IA * reply_ia = dynamic_cast<Option6IA*> ( tmp.get() );
- EXPECT_EQ( 2345, reply_ia->getIAID() );
-
- // more checks to be implemented
- delete srv;
-
+ delete srv;
+ });
+
}
}
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
index 61ec009..5ae1f5e 100644
--- a/src/bin/dhcp6/tests/dhcp6_test.py
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
index 8f3b7ca..31b20ba 100644
--- a/src/bin/dhcp6/tests/iface_mgr_unittest.cc
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -25,18 +25,17 @@
#endif
#include <gtest/gtest.h>
-#include <asiolink/io_address.h>
-#include <dhcp/pkt6.h>
-#include <dhcp6/iface_mgr.h>
+#include "asiolink/io_address.h"
+#include "dhcp6/pkt6.h"
+#include "dhcp6/iface_mgr.h"
using namespace std;
using namespace isc;
using namespace isc::asiolink;
-// name of loopback interface detection
-char LOOPBACK[32] = "lo";
-
namespace {
+const char* const INTERFACE_FILE = TEST_DATA_BUILDDIR "/interfaces.txt";
+
class NakedIfaceMgr: public IfaceMgr {
// "naked" Interface Manager, exposes internal fields
public:
@@ -60,124 +59,6 @@ public:
}
};
-// We need some known interface to work reliably. Loopback interface
-// is named lo on Linux and lo0 on BSD boxes. We need to find out
-// which is available. This is not a real test, but rather a workaround
-// that will go away when interface detection is implemented.
-TEST_F(IfaceMgrTest, loDetect) {
-
- unlink("interfaces.txt");
-
- ofstream interfaces("interfaces.txt", ios::ate);
- interfaces << "lo ::1";
- interfaces.close();
-
- NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
- IOAddress loAddr("::1");
- IOAddress mcastAddr("ff02::1:2");
-
- // bind multicast socket to port 10547
- int socket1 = ifacemgr->openSocket("lo", mcastAddr, 10547);
- // this fails on BSD (there's no lo interface there)
-
- // poor man's interface dection
- // it will go away as soon as proper interface detection
- // is implemented
-#ifdef _WIN32
- if (socket1 != INVALID_SOCKET) {
- cout << "This is Linux, using lo as loopback." << endl;
- closesocket(socket1);
- }
-#else
- if (socket1>0) {
- cout << "This is Linux, using lo as loopback." << endl;
- close(socket1);
- }
-#endif
- else {
- // this fails on Linux and succeeds on BSD
- socket1 = ifacemgr->openSocket("lo0", mcastAddr, 10547);
-#ifdef _WIN32
- if (socket1 != INVALID_SOCKET) {
- sprintf(LOOPBACK, "lo0");
- cout << "This is BSD, using lo0 as loopback." << endl;
- closesocket(socket1);
- }
-#else
- if (socket1>0) {
- sprintf(LOOPBACK, "lo0");
- cout << "This is BSD, using lo0 as loopback." << endl;
- close(socket1);
- }
-#endif
- else {
- cout << "Failed to detect loopback interface. Neither "
- << "lo or lo0 worked. I give up." << endl;
- ASSERT_TRUE(false);
- }
- }
-
- delete ifacemgr;
-}
-
-// uncomment this test to create packet writer. It will
-// write incoming DHCPv6 packets as C arrays. That is useful
-// for generating test sequences based on actual traffic
-//
-// TODO: this potentially should be moved to a separate tool
-//
-
-#if 0
-TEST_F(IfaceMgrTest, dhcp6Sniffer) {
- // testing socket operation in a portable way is tricky
- // without interface detection implemented
-
- unlink("interfaces.txt");
-
- ofstream interfaces("interfaces.txt", ios::ate);
- interfaces << "eth0 fe80::21e:8cff:fe9b:7349";
- interfaces.close();
-
- NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
-
- Pkt6 * pkt = 0;
- int cnt = 0;
- cout << "---8X-----------------------------------------" << endl;
- while (true) {
- pkt = ifacemgr->receive();
-
- cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
- cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
- cout << " Pkt6* pkt;" << endl;
- cout << " pkt = new Pkt6(" << pkt->data_len_ << ");" << endl;
- cout << " pkt->remote_port_ = " << pkt-> remote_port_ << ";" << endl;
- cout << " pkt->remote_addr_ = IOAddress(\""
- << pkt->remote_addr_.toText() << "\");" << endl;
- cout << " pkt->local_port_ = " << pkt-> local_port_ << ";" << endl;
- cout << " pkt->local_addr_ = IOAddress(\""
- << pkt->local_addr_.toText() << "\");" << endl;
- cout << " pkt->ifindex_ = " << pkt->ifindex_ << ";" << endl;
- cout << " pkt->iface_ = \"" << pkt->iface_ << "\";" << endl;
- for (int i=0; i< pkt->data_len_; i++) {
- cout << " pkt->data_[" << i << "]="
- << (int)(unsigned char)pkt->data_[i] << "; ";
- if (!(i%4))
- cout << endl;
- }
- cout << endl;
- cout << " return (pkt);" << endl;
- cout << "}" << endl << endl;
-
- delete pkt;
- }
- cout << "---8X-----------------------------------------" << endl;
-
- // never happens. Infinite loop is infinite
- delete pkt;
- delete ifacemgr;
-}
-#endif
-
TEST_F(IfaceMgrTest, basic) {
// checks that IfaceManager can be instantiated
@@ -196,35 +77,23 @@ TEST_F(IfaceMgrTest, ifaceClass) {
}
-// TODO: Implement getPlainMac() test as soon as interface detection
-// is implemented.
-
+// TODO: Implement getPlainMac() test as soon as interface detection is implemented.
TEST_F(IfaceMgrTest, getIface) {
cout << "Interface checks. Please ignore socket binding errors." << endl;
NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
// interface name, ifindex
- IfaceMgr::Iface iface1("lo1", 1);
+ IfaceMgr::Iface iface1("lo", 1);
IfaceMgr::Iface iface2("eth5", 2);
IfaceMgr::Iface iface3("en3", 5);
IfaceMgr::Iface iface4("e1000g0", 3);
- // note: real interfaces may be detected as well
ifacemgr->getIfacesLst().push_back(iface1);
ifacemgr->getIfacesLst().push_back(iface2);
ifacemgr->getIfacesLst().push_back(iface3);
ifacemgr->getIfacesLst().push_back(iface4);
- cout << "There are " << ifacemgr->getIfacesLst().size()
- << " interfaces." << endl;
- for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
- iface != ifacemgr->getIfacesLst().end();
- ++iface) {
- cout << " " << iface->name_ << "/" << iface->ifindex_ << endl;
- }
-
-
// check that interface can be retrieved by ifindex
IfaceMgr::Iface * tmp = ifacemgr->getIface(5);
// ASSERT_NE(NULL, tmp); is not supported. hmmmm.
@@ -234,14 +103,14 @@ TEST_F(IfaceMgrTest, getIface) {
EXPECT_EQ(5, tmp->ifindex_);
// check that interface can be retrieved by name
- tmp = ifacemgr->getIface("lo1");
+ tmp = ifacemgr->getIface("lo");
ASSERT_TRUE( tmp != NULL );
- EXPECT_STREQ( "lo1", tmp->name_.c_str() );
+ EXPECT_STREQ( "lo", tmp->name_.c_str() );
EXPECT_EQ(1, tmp->ifindex_);
// check that non-existing interfaces are not returned
- EXPECT_EQ(0, ifacemgr->getIface("wifi0") );
+ EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
delete ifacemgr;
}
@@ -251,7 +120,7 @@ TEST_F(IfaceMgrTest, detectIfaces) {
// test detects that interfaces can be detected
// there is no code for that now, but interfaces are
// read from file
- fstream fakeifaces("interfaces.txt", ios::out|ios::trunc);
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
fakeifaces << "eth0 fe80::1234";
fakeifaces.close();
@@ -276,7 +145,11 @@ TEST_F(IfaceMgrTest, detectIfaces) {
delete ifacemgr;
}
-TEST_F(IfaceMgrTest, sockets) {
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sockets) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
@@ -285,15 +158,15 @@ TEST_F(IfaceMgrTest, sockets) {
IOAddress loAddr("::1");
// bind multicast socket to port 10547
- int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ int socket1 = ifacemgr->openSocket("lo", loAddr, 10547);
#ifdef _WIN32
- EXPECT_NE(socket1, INVALID_SOCKET); // socket != INVALID_SOCKET
+ EXPECT_NE(socket1, INVALID_SOCKET);
#else
EXPECT_GT(socket1, 0); // socket > 0
#endif
// bind unicast socket to port 10548
- int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
+ int socket2 = ifacemgr->openSocket("lo", loAddr, 10548);
#ifdef _WIN32
EXPECT_NE(socket2, INVALID_SOCKET);
#else
@@ -302,13 +175,12 @@ TEST_F(IfaceMgrTest, sockets) {
// expect success. This address/port is already bound, but
// we are using SO_REUSEADDR, so we can bind it twice
- int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-
- // rebinding succeeds on Linux, fails on BSD
- // TODO: add OS-specific defines here (or modify code to
- // behave the same way on all OSes, but that may not be
- // possible
- // EXPECT_GT(socket3, 0); // socket > 0
+ int socket3 = ifacemgr->openSocket("lo", loAddr, 10547);
+#ifdef _WIN32
+ EXPECT_NE(socket3, INVALID_SOCKET);
+#else
+ EXPECT_GT(socket3, 0); // socket > 0
+#endif
// we now have 3 sockets open at the same time. Looks good.
@@ -321,10 +193,13 @@ TEST_F(IfaceMgrTest, sockets) {
close(socket2);
close(socket3);
#endif
+
delete ifacemgr;
}
-TEST_F(IfaceMgrTest, socketsMcast) {
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
@@ -334,18 +209,18 @@ TEST_F(IfaceMgrTest, socketsMcast) {
IOAddress mcastAddr("ff02::1:2");
// bind multicast socket to port 10547
- int socket1 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+ int socket1 = ifacemgr->openSocket("lo", mcastAddr, 10547);
#ifdef _WIN32
- EXPECT_NE(socket1, INVALID_SOCKET); // socket != INVALID_SOCKET
+ EXPECT_NE(socket1, INVALID_SOCKET);
#else
EXPECT_GT(socket1, 0); // socket > 0
#endif
// expect success. This address/port is already bound, but
// we are using SO_REUSEADDR, so we can bind it twice
- int socket2 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+ int socket2 = ifacemgr->openSocket("lo", mcastAddr, 10547);
#ifdef _WIN32
- EXPECT_NE(socket2, INVALID_SOCKET);
+ EXPECT_NE(socket1, INVALID_SOCKET);
#else
EXPECT_GT(socket2, 0);
#endif
@@ -366,37 +241,41 @@ TEST_F(IfaceMgrTest, socketsMcast) {
delete ifacemgr;
}
-TEST_F(IfaceMgrTest, sendReceive) {
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
- fstream fakeifaces("interfaces.txt", ios::out|ios::trunc);
- fakeifaces << LOOPBACK << " ::1";
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << "lo ::1";
fakeifaces.close();
NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
// let's assume that every supported OS have lo interface
IOAddress loAddr("::1");
- int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
- int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+ int socket1 = ifacemgr->openSocket("lo", loAddr, 10547);
+ int socket2 = ifacemgr->openSocket("lo", loAddr, 10546);
ifacemgr->setSendSock(socket2);
ifacemgr->setRecvSock(socket1);
- boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
+ Pkt6 sendPkt(128);
// prepare dummy payload
for (int i=0;i<128; i++) {
- sendPkt->data_[i] = i;
+ sendPkt.data_[i] = i;
}
- sendPkt->remote_port_ = 10547;
- sendPkt->remote_addr_ = IOAddress("::1");
- sendPkt->ifindex_ = 1;
- sendPkt->iface_ = LOOPBACK;
+ sendPkt.remote_port_ = 10547;
+ sendPkt.remote_addr_ = IOAddress("::1");
+ sendPkt.ifindex_ = 1;
+ sendPkt.iface_ = "lo";
- boost::shared_ptr<Pkt6> rcvPkt;
+ Pkt6 * rcvPkt;
EXPECT_EQ(true, ifacemgr->send(sendPkt));
@@ -405,13 +284,15 @@ TEST_F(IfaceMgrTest, sendReceive) {
ASSERT_TRUE( rcvPkt != NULL ); // received our own packet
// let's check that we received what was sent
- EXPECT_EQ(sendPkt->data_len_, rcvPkt->data_len_);
- EXPECT_EQ(0, memcmp(&sendPkt->data_[0], &rcvPkt->data_[0],
+ EXPECT_EQ(sendPkt.data_len_, rcvPkt->data_len_);
+ EXPECT_EQ(0, memcmp(&sendPkt.data_[0], &rcvPkt->data_[0],
rcvPkt->data_len_) );
- EXPECT_EQ(sendPkt->remote_addr_, rcvPkt->remote_addr_);
+ EXPECT_EQ(sendPkt.remote_addr_.toText(), rcvPkt->remote_addr_.toText());
EXPECT_EQ(rcvPkt->remote_port_, 10546);
+ delete rcvPkt;
+
delete ifacemgr;
}
diff --git a/src/bin/dhcp6/tests/pkt6_unittest.cc b/src/bin/dhcp6/tests/pkt6_unittest.cc
new file mode 100644
index 0000000..86c6cb2
--- /dev/null
+++ b/src/bin/dhcp6/tests/pkt6_unittest.cc
@@ -0,0 +1,48 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#else
+#include <arpa/inet.h>
+#endif
+#include <gtest/gtest.h>
+
+
+#include "dhcp6/pkt6.h"
+
+using namespace std;
+using namespace isc;
+
+namespace {
+// empty class for now, but may be extended once Addr6 becomes bigger
+class Pkt6Test : public ::testing::Test {
+public:
+ Pkt6Test() {
+ }
+};
+
+TEST_F(Pkt6Test, constructor) {
+ Pkt6 * pkt1 = new Pkt6(17);
+
+ ASSERT_EQ(pkt1->data_len_, 17);
+
+ delete pkt1;
+}
+
+}
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 97a2ba6..12ddab3 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -60,6 +60,4 @@ run_unittests_CXXFLAGS += -Wno-unused-parameter
endif
endif
-
-
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index 3289765..63e2a3b 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,7 +5,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
+b10_stats_DATA = stats.spec stats-httpd.spec
b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
@@ -21,7 +21,7 @@ CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
man_MANS = b10-stats.8 b10-stats-httpd.8
EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
+EXTRA_DIST += stats.spec stats-httpd.spec
EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index ed4aafa..1206e1d 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -36,7 +36,7 @@ b10-stats-httpd \- BIND 10 HTTP server for HTTP/XML interface of statistics
.PP
\fBb10\-stats\-httpd\fR
-is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data from
+is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data or its schema from
\fBb10\-stats\fR, and it sends the data back in Python dictionary format and the server converts it into XML format\&. The server sends it to the HTTP client\&. The server can send three types of document, which are XML (Extensible Markup Language), XSD (XML Schema definition) and XSL (Extensible Stylesheet Language)\&. The XML document is the statistics data of BIND 10, The XSD document is the data schema of it, and The XSL document is the style sheet to be showed for the web browsers\&. There is different URL for each document\&. But please note that you would be redirected to the URL of XML document if you request the URL of the root document\&. For example, you would be redirected to http://127\&.0\&.0\&.1:8000/bind10/statistics/xml if you request http://127\&.0\&.0\&.1:8000/\&. Please see the manual and the spec file of
\fBb10\-stats\fR
for more details about the items of BIND 10 statistics\&. The server uses CC session in communication with
@@ -66,10 +66,6 @@ bindctl(1)\&. Please see the manual of
bindctl(1)
about how to configure the settings\&.
.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
-.PP
/usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
\(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 34c704f..c8df9b8 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -57,7 +57,7 @@
by the BIND 10 boss process (<command>bind10</command>) and eventually
exited by it. The server is intended to be server requests by HTTP
clients like web browsers and third-party modules. When the server is
- asked, it requests BIND 10 statistics data from
+ asked, it requests BIND 10 statistics data or its schema from
<command>b10-stats</command>, and it sends the data back in Python
dictionary format and the server converts it into XML format. The server
sends it to the HTTP client. The server can send three types of document,
@@ -112,12 +112,6 @@
of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
how to configure the settings.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
<para>
<filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -138,7 +132,7 @@
<refsect1>
<title>CONFIGURATION AND COMMANDS</title>
<para>
- The configurable setting in
+ The configurable setting in
<filename>stats-httpd.spec</filename> is:
</para>
<variablelist>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index 98b109b..0204ca1 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -135,10 +135,6 @@ See other manual pages for explanations for their statistics that are kept track
\fBb10\-stats\fR\&. It contains commands for
\fBb10\-stats\fR\&. They can be invoked via
bindctl(1)\&.
-.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
.SH "SEE ALSO"
.PP
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index 9709175..13ada7a 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -213,12 +213,6 @@
invoked
via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
</refsect1>
<refsect1>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index 01ffdc6..a1f6406 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -44,6 +44,7 @@ td.title {
<h1>BIND 10 Statistics</h1>
<table>
<tr>
+ <th>Owner</th>
<th>Title</th>
<th>Value</th>
</tr>
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
deleted file mode 100644
index 5252865..0000000
--- a/src/bin/stats/stats-schema.spec
+++ /dev/null
@@ -1,86 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Statistics data schema",
- "config_data": [
- {
- "item_name": "report_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Report time",
- "item_description": "A date time when stats module reports",
- "item_format": "date-time"
- },
- {
- "item_name": "bind10.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "bind10.BootTime",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.BootTime",
- "item_description": "A date time when the stats module starts initially or when the stats module restarts",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.start_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.StartTime",
- "item_description": "A date time when the stats module starts collecting data or resetting values last time",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.last_update_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.LastUpdateTime",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.timestamp",
- "item_type": "real",
- "item_optional": false,
- "item_default": 0.0,
- "item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
- },
- {
- "item_name": "stats.lname",
- "item_type": "string",
- "item_optional": false,
- "item_default": "",
- "item_title": "stats.LocalName",
- "item_description": "A localname of stats module given via CC protocol"
- },
- {
- "item_name": "auth.queries.tcp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.tcp",
- "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
- },
- {
- "item_name": "auth.queries.udp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.udp",
- "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
- }
- ],
- "commands": []
- }
-}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index afed544..da00818 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -15,16 +15,17 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+Statistics daemon in BIND 10
+
+"""
import sys; sys.path.append ('@@PYTHONPATH@@')
import os
-import signal
-import select
from time import time, strftime, gmtime
from optparse import OptionParser, OptionValueError
-from collections import defaultdict
-from isc.config.ccsession import ModuleCCSession, create_answer
-from isc.cc import Session, SessionError
+import isc
+import isc.util.process
import isc.log
from isc.log_messages.stats_messages import *
@@ -35,226 +36,157 @@ logger = isc.log.Logger("stats")
# have #1074
DBG_STATS_MESSAGING = 30
+# This is for boot_time of Stats
+_BASETIME = gmtime()
+
# for setproctitle
-import isc.util.process
isc.util.process.rename()
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats"
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
- BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
+ SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
+ SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
-class Singleton(type):
+def get_timestamp():
"""
- A abstract class of singleton pattern
+ get current timestamp
"""
- # Because of singleton pattern:
- # At the beginning of coding, one UNIX domain socket is needed
- # for config manager, another socket is needed for stats module,
- # then stats module might need two sockets. So I adopted the
- # singleton pattern because I avoid creating multiple sockets in
- # one stats module. But in the initial version stats module
- # reports only via bindctl, so just one socket is needed. To use
- # the singleton pattern is not important now. :(
+ return time()
- def __init__(self, *args, **kwargs):
- type.__init__(self, *args, **kwargs)
- self._instances = {}
+def get_datetime(gmt=None):
+ """
+ get current datetime
+ """
+ if not gmt: gmt = gmtime()
+ return strftime("%Y-%m-%dT%H:%M:%SZ", gmt)
- def __call__(self, *args, **kwargs):
- if args not in self._instances:
- self._instances[args]={}
- kw = tuple(kwargs.items())
- if kw not in self._instances[args]:
- self._instances[args][kw] = type.__call__(self, *args, **kwargs)
- return self._instances[args][kw]
+def get_spec_defaults(spec):
+ """
+ extracts the default values of the items from spec specified in
+ arg, and returns the dict-type variable which is a set of the item
+ names and the default values
+ """
+ if type(spec) is not list: return {}
+ def _get_spec_defaults(spec):
+ item_type = spec['item_type']
+ if item_type == "integer":
+ return int(spec.get('item_default', 0))
+ elif item_type == "real":
+ return float(spec.get('item_default', 0.0))
+ elif item_type == "boolean":
+ return bool(spec.get('item_default', False))
+ elif item_type == "string":
+ return str(spec.get('item_default', ""))
+ elif item_type == "list":
+ return spec.get(
+ "item_default",
+ [ _get_spec_defaults(spec["list_item_spec"]) ])
+ elif item_type == "map":
+ return spec.get(
+ "item_default",
+ dict([ (s["item_name"], _get_spec_defaults(s)) for s in spec["map_item_spec"] ]) )
+ else:
+ return spec.get("item_default", None)
+ return dict([ (s['item_name'], _get_spec_defaults(s)) for s in spec ])
class Callback():
"""
A Callback handler class
"""
- def __init__(self, name=None, callback=None, args=(), kwargs={}):
- self.name = name
- self.callback = callback
+ def __init__(self, command=None, args=(), kwargs={}):
+ self.command = command
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
- if not args:
- args = self.args
- if not kwargs:
- kwargs = self.kwargs
- if self.callback:
- return self.callback(*args, **kwargs)
+ if not args: args = self.args
+ if not kwargs: kwargs = self.kwargs
+ if self.command: return self.command(*args, **kwargs)
-class Subject():
- """
- A abstract subject class of observer pattern
- """
- # Because of observer pattern:
- # In the initial release, I'm also sure that observer pattern
- # isn't definitely needed because the interface between gathering
- # and reporting statistics data is single. However in the future
- # release, the interfaces may be multiple, that is, multiple
- # listeners may be needed. For example, one interface, which
- # stats module has, is for between ''config manager'' and stats
- # module, another interface is for between ''HTTP server'' and
- # stats module, and one more interface is for between ''SNMP
- # server'' and stats module. So by considering that stats module
- # needs multiple interfaces in the future release, I adopted the
- # observer pattern in stats module. But I don't have concrete
- # ideas in case of multiple listener currently.
-
- def __init__(self):
- self._listeners = []
-
- def attach(self, listener):
- if not listener in self._listeners:
- self._listeners.append(listener)
-
- def detach(self, listener):
- try:
- self._listeners.remove(listener)
- except ValueError:
- pass
+class StatsError(Exception):
+ """Exception class for Stats class"""
+ pass
- def notify(self, event, modifier=None):
- for listener in self._listeners:
- if modifier != listener:
- listener.update(event)
-
-class Listener():
+class Stats:
"""
- A abstract listener class of observer pattern
+ Main class of stats module
"""
- def __init__(self, subject):
- self.subject = subject
- self.subject.attach(self)
- self.events = {}
-
- def update(self, name):
- if name in self.events:
- callback = self.events[name]
- return callback()
-
- def add_event(self, event):
- self.events[event.name]=event
-
-class SessionSubject(Subject, metaclass=Singleton):
- """
- A concrete subject class which creates CC session object
- """
- def __init__(self, session=None):
- Subject.__init__(self)
- self.session=session
- self.running = False
-
- def start(self):
- self.running = True
- self.notify('start')
-
- def stop(self):
+ def __init__(self):
self.running = False
- self.notify('stop')
-
- def check(self):
- self.notify('check')
-
-class CCSessionListener(Listener):
- """
- A concrete listener class which creates SessionSubject object and
- ModuleCCSession object
- """
- def __init__(self, subject):
- Listener.__init__(self, subject)
- self.session = subject.session
- self.boot_time = get_datetime()
-
# create ModuleCCSession object
- self.cc_session = ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- self.session)
-
- self.session = self.subject.session = self.cc_session._session
-
- # initialize internal data
- self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # add event handler invoked via SessionSubject object
- self.add_event(Callback('start', self.start))
- self.add_event(Callback('stop', self.stop))
- self.add_event(Callback('check', self.check))
- # don't add 'command_' suffix to the special commands in
- # order to prevent executing internal command via bindctl
-
+ self.mccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.cc_session = self.mccs._session
+ # get module spec
+ self.module_name = self.mccs.get_module_spec().get_module_name()
+ self.modules = {}
+ self.statistics_data = {}
# get commands spec
- self.commands_spec = self.cc_session.get_module_spec().get_commands_spec()
-
+ self.commands_spec = self.mccs.get_module_spec().get_commands_spec()
# add event handler related command_handler of ModuleCCSession
- # invoked via bindctl
+ self.callbacks = {}
for cmd in self.commands_spec:
+ # add prefix "command_"
+ name = "command_" + cmd["command_name"]
try:
- # add prefix "command_"
- name = "command_" + cmd["command_name"]
callback = getattr(self, name)
- kwargs = self.initialize_data(cmd["command_args"])
- self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
- except AttributeError as ae:
- logger.error(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
-
- def _update_stats_data(self, args):
- # 'args' must be dictionary type
- if isinstance(args, dict) and isinstance(args.get('stats_data'), dict):
- self.stats_data.update(args['stats_data'])
-
- # overwrite "stats.LastUpdateTime"
- self.stats_data['stats.last_update_time'] = get_datetime()
+ kwargs = get_spec_defaults(cmd["command_args"])
+ self.callbacks[name] = Callback(command=callback, kwargs=kwargs)
+ except AttributeError:
+ raise StatsError(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+ self.mccs.start()
def start(self):
"""
- start the cc chanel
+ Start stats module
"""
- # set initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
- self.cc_session.start()
+ self.running = True
+ logger.info(STATS_STARTING)
+
# request Bob to send statistics data
logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
cmd = isc.config.ccsession.create_command("getstats", None)
- seq = self.session.group_sendmsg(cmd, 'Boss')
+ seq = self.cc_session.group_sendmsg(cmd, 'Boss')
try:
- answer, env = self.session.group_recvmsg(False, seq)
+ answer, env = self.cc_session.group_recvmsg(False, seq)
if answer:
- rcode, arg = isc.config.ccsession.parse_answer(answer)
+ rcode, args = isc.config.ccsession.parse_answer(answer)
if rcode == 0:
- self._update_stats_data(arg)
+ errors = self.update_statistics_data(
+ args["owner"], **args["data"])
+ if errors:
+ raise StatsError("boss spec file is incorrect: "
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name,
+ last_update_time=get_datetime())
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
except isc.cc.session.SessionTimeout:
pass
- def stop(self):
- """
- stop the cc chanel
- """
- return self.cc_session.close()
+ # initialized Statistics data
+ errors = self.update_statistics_data(
+ self.module_name,
+ lname=self.cc_session.lname,
+ boot_time=get_datetime(_BASETIME)
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
- def check(self):
- """
- check the cc chanel
- """
- return self.cc_session.check_command(False)
+ while self.running:
+ self.mccs.check_command(False)
def config_handler(self, new_config):
"""
@@ -262,169 +194,222 @@ class CCSessionListener(Listener):
"""
logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
new_config)
-
# do nothing currently
- return create_answer(0)
+ return isc.config.create_answer(0)
- def command_handler(self, command, *args, **kwargs):
+ def command_handler(self, command, kwargs):
"""
handle commands from the cc channel
"""
- # add 'command_' suffix in order to executing command via bindctl
name = 'command_' + command
-
- if name in self.events:
- event = self.events[name]
- return event(*args, **kwargs)
+ if name in self.callbacks:
+ callback = self.callbacks[name]
+ if kwargs:
+ return callback(**kwargs)
+ else:
+ return callback()
else:
- return self.command_unknown(command, args)
+ logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
+ return isc.config.create_answer(1, "Unknown command: '"+str(command)+"'")
- def command_shutdown(self, args):
+ def update_modules(self):
"""
- handle shutdown command
+ updates information of each module. This method gets each
+ module's information from the config manager and sets it into
+ self.modules. If its getting from the config manager fails, it
+ raises StatsError.
"""
- logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
- self.subject.running = False
- return create_answer(0)
+ modules = {}
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command(
+ isc.config.ccsession.COMMAND_GET_STATISTICS_SPEC),
+ 'ConfigManager')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ for mod in value:
+ spec = { "module_name" : mod }
+ if value[mod] and type(value[mod]) is list:
+ spec["statistics"] = value[mod]
+ modules[mod] = isc.config.module_spec.ModuleSpec(spec)
+ else:
+ raise StatsError("Updating module spec fails: " + str(value))
+ modules[self.module_name] = self.mccs.get_module_spec()
+ self.modules = modules
- def command_set(self, args, stats_data={}):
+ def get_statistics_data(self, owner=None, name=None):
"""
- handle set command
+ returns statistics data which stats module has of each
+ module. If it can't find specified statistics data, it raises
+ StatsError.
"""
- self._update_stats_data(args)
- return create_answer(0)
+ self.update_statistics_data()
+ if owner and name:
+ try:
+ return self.statistics_data[owner][name]
+ except KeyError:
+ pass
+ elif owner:
+ try:
+ return self.statistics_data[owner]
+ except KeyError:
+ pass
+ elif name:
+ pass
+ else:
+ return self.statistics_data
+ raise StatsError("No statistics data found: "
+ + "owner: " + str(owner) + ", "
+ + "name: " + str(name))
- def command_remove(self, args, stats_item_name=''):
+ def update_statistics_data(self, owner=None, **data):
"""
- handle remove command
+ change statistics date of specified module into specified
+ data. It updates information of each module first, and it
+ updates statistics data. If specified data is invalid for
+ statistics spec of specified owner, it returns a list of error
+ messeges. If there is no error or if neither owner nor data is
+ specified in args, it returns None.
"""
-
- # 'args' must be dictionary type
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
-
- logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_REMOVE_COMMAND,
- stats_item_name)
-
- # just remove one item
- self.stats_data.pop(stats_item_name)
-
- return create_answer(0)
-
- def command_show(self, args, stats_item_name=''):
+ self.update_modules()
+ statistics_data = {}
+ for (name, module) in self.modules.items():
+ value = get_spec_defaults(module.get_statistics_spec())
+ if module.validate_statistics(True, value):
+ statistics_data[name] = value
+ for (name, value) in self.statistics_data.items():
+ if name in statistics_data:
+ statistics_data[name].update(value)
+ else:
+ statistics_data[name] = value
+ self.statistics_data = statistics_data
+ if owner and data:
+ errors = []
+ try:
+ if self.modules[owner].validate_statistics(False, data, errors):
+ self.statistics_data[owner].update(data)
+ return
+ except KeyError:
+ errors.append("unknown module name: " + str(owner))
+ return errors
+
+ def command_status(self):
"""
- handle show command
+ handle status command
"""
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
+ return isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")")
- # always overwrite 'report_time' and 'stats.timestamp'
- # if "show" command invoked
- self.stats_data['report_time'] = get_datetime()
- self.stats_data['stats.timestamp'] = get_timestamp()
-
- # if with args
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_SHOW_NAME_COMMAND,
- stats_item_name)
- return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
-
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_SHOW_ALL_COMMAND)
- return create_answer(0, self.stats_data)
-
- def command_reset(self, args):
+ def command_shutdown(self):
"""
- handle reset command
+ handle shutdown command
"""
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_RESET_COMMAND)
-
- # re-initialize internal variables
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # reset initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
-
- return create_answer(0)
+ logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
+ self.running = False
+ return isc.config.create_answer(0)
- def command_status(self, args):
+ def command_show(self, owner=None, name=None):
"""
- handle status command
+ handle show command
"""
- logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
- # just return "I'm alive."
- return create_answer(0, "I'm alive.")
-
- def command_unknown(self, command, args):
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_ALL_COMMAND)
+ errors = self.update_statistics_data(
+ self.module_name,
+ timestamp=get_timestamp(),
+ report_time=get_datetime()
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ try:
+ return isc.config.create_answer(
+ 0, self.get_statistics_data(owner, name))
+ except StatsError:
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
+
+ def command_showschema(self, owner=None, name=None):
"""
- handle an unknown command
+ handle show command
"""
- logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
- return create_answer(1, "Unknown command: '"+str(command)+"'")
-
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND)
+ self.update_modules()
+ schema = {}
+ schema_byname = {}
+ for mod in self.modules:
+ spec = self.modules[mod].get_statistics_spec()
+ schema_byname[mod] = {}
+ if spec:
+ schema[mod] = spec
+ for item in spec:
+ schema_byname[mod][item['item_name']] = item
+ if owner:
+ try:
+ if name:
+ return isc.config.create_answer(0, schema_byname[owner][name])
+ else:
+ return isc.config.create_answer(0, schema[owner])
+ except KeyError:
+ pass
+ else:
+ if name:
+ return isc.config.create_answer(1, "module name is not specified")
+ else:
+ return isc.config.create_answer(0, schema)
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
- def initialize_data(self, spec):
+ def command_set(self, owner, data):
"""
- initialize stats data
+ handle set command
"""
- def __get_init_val(spec):
- if spec['item_type'] == 'null':
- return None
- elif spec['item_type'] == 'boolean':
- return bool(spec.get('item_default', False))
- elif spec['item_type'] == 'string':
- return str(spec.get('item_default', ''))
- elif spec['item_type'] in set(['number', 'integer']):
- return int(spec.get('item_default', 0))
- elif spec['item_type'] in set(['float', 'double', 'real']):
- return float(spec.get('item_default', 0.0))
- elif spec['item_type'] in set(['list', 'array']):
- return spec.get('item_default',
- [ __get_init_val(s) for s in spec['list_item_spec'] ])
- elif spec['item_type'] in set(['map', 'object']):
- return spec.get('item_default',
- dict([ (s['item_name'], __get_init_val(s)) for s in spec['map_item_spec'] ]) )
- else:
- return spec.get('item_default')
- return dict([ (s['item_name'], __get_init_val(s)) for s in spec ])
+ errors = self.update_statistics_data(owner, **data)
+ if errors:
+ return isc.config.create_answer(
+ 1, "errors while setting statistics data: " \
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name, last_update_time=get_datetime() )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ return isc.config.create_answer(0)
-def get_timestamp():
- """
- get current timestamp
- """
- return time()
-
-def get_datetime():
- """
- get current datetime
- """
- return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
-
-def main(session=None):
+if __name__ == "__main__":
try:
parser = OptionParser()
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ parser.add_option(
+ "-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
(options, args) = parser.parse_args()
if options.verbose:
isc.log.init("b10-stats", "DEBUG", 99)
- subject = SessionSubject(session=session)
- listener = CCSessionListener(subject)
- subject.start()
- while subject.running:
- subject.check()
- subject.stop()
-
+ stats = Stats()
+ stats.start()
except OptionValueError as ove:
logger.fatal(STATS_BAD_OPTION_VALUE, ove)
- except SessionError as se:
+ sys.exit(1)
+ except isc.cc.session.SessionError as se:
logger.fatal(STATS_CC_SESSION_ERROR, se)
+ sys.exit(1)
+ except StatsError as se:
+ logger.fatal(STATS_START_ERROR, se)
+ sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATS_STOPPED_BY_KEYBOARD)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 635eb48..e716b62 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -6,55 +6,74 @@
"commands": [
{
"command_name": "status",
- "command_description": "identify whether stats module is alive or not",
+ "command_description": "Show status of the stats daemon",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats module",
"command_args": []
},
{
"command_name": "show",
- "command_description": "show the specified/all statistics data",
+ "command_description": "Show the specified/all statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
"item_type": "string",
"item_optional": true,
- "item_default": ""
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "set",
- "command_description": "set the value of specified name in statistics data",
+ "command_name": "showschema",
+ "command_description": "show the specified/all statistics shema",
"command_args": [
{
- "item_name": "stats_data",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": []
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "remove",
- "command_description": "remove the specified name from statistics data",
+ "command_name": "set",
+ "command_description": "set the value of specified name in statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
"item_type": "string",
"item_optional": false,
- "item_default": ""
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "data",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "item_description": "statistics data set of the owner",
+ "map_item_spec": []
}
]
- },
- {
- "command_name": "reset",
- "command_description": "reset all statistics data to default values except for several constant names",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats module",
- "command_args": []
}
],
"statistics": [
@@ -100,7 +119,7 @@
"item_default": "",
"item_title": "Local Name",
"item_description": "A localname of stats module given via CC protocol"
- }
+ }
]
}
}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100755
new mode 100644
index 6be6adf..596870a
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -57,7 +57,6 @@ else:
BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -69,7 +68,6 @@ XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
-DEFAULT_CONFIG = dict(listen_on=[('127.0.0.1', 8000)])
# Assign this process name
isc.util.process.rename()
@@ -160,8 +158,10 @@ class StatsHttpd:
self.mccs = None
self.httpd = []
self.open_mccs()
+ self.config = {}
self.load_config()
- self.load_templates()
+ self.http_addrs = []
+ self.mccs.start()
self.open_httpd()
def open_mccs(self):
@@ -171,10 +171,6 @@ class StatsHttpd:
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
- # read spec file of stats module and subscribe 'Stats'
- self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
- self.stats_config_spec = self.stats_module_spec.get_config_spec()
- self.stats_module_name = self.stats_module_spec.get_module_name()
def close_mccs(self):
"""Closes a ModuleCCSession object"""
@@ -189,18 +185,19 @@ class StatsHttpd:
"""Loads configuration from spec file or new configuration
from the config manager"""
# load config
- if len(new_config) > 0:
- self.config.update(new_config)
- else:
- self.config = DEFAULT_CONFIG
- self.config.update(
- dict([
- (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
- for itm in self.mccs.get_module_spec().get_config_spec()
- ])
- )
+ if len(self.config) == 0:
+ self.config = dict([
+ (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
+ for itm in self.mccs.get_module_spec().get_config_spec()
+ ])
+ self.config.update(new_config)
# set addresses and ports for HTTP
- self.http_addrs = [ (cf['address'], cf['port']) for cf in self.config['listen_on'] ]
+ addrs = []
+ if 'listen_on' in self.config:
+ for cf in self.config['listen_on']:
+ if 'address' in cf and 'port' in cf:
+ addrs.append((cf['address'], cf['port']))
+ self.http_addrs = addrs
def open_httpd(self):
"""Opens sockets for HTTP. Iterating each HTTP address to be
@@ -208,46 +205,44 @@ class StatsHttpd:
for addr in self.http_addrs:
self.httpd.append(self._open_httpd(addr))
- def _open_httpd(self, server_address, address_family=None):
+ def _open_httpd(self, server_address):
+ httpd = None
try:
- # try IPv6 at first
- if address_family is not None:
- HttpServer.address_family = address_family
- elif socket.has_ipv6:
- HttpServer.address_family = socket.AF_INET6
+ # get address family for the server_address before
+ # creating HttpServer object. If a specified address is
+ # not numerical, gaierror may be thrown.
+ address_family = socket.getaddrinfo(
+ server_address[0], server_address[1], 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST
+ )[0][0]
+ HttpServer.address_family = address_family
httpd = HttpServer(
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
self.write_log)
- except (socket.gaierror, socket.error,
- OverflowError, TypeError) as err:
- # try IPv4 next
- if HttpServer.address_family == socket.AF_INET6:
- httpd = self._open_httpd(server_address, socket.AF_INET)
- else:
- raise HttpServerError(
- "Invalid address %s, port %s: %s: %s" %
- (server_address[0], server_address[1],
- err.__class__.__name__, err))
- else:
logger.info(STATHTTPD_STARTED, server_address[0],
server_address[1])
- return httpd
+ return httpd
+ except (socket.gaierror, socket.error,
+ OverflowError, TypeError) as err:
+ if httpd:
+ httpd.server_close()
+ raise HttpServerError(
+ "Invalid address %s, port %s: %s: %s" %
+ (server_address[0], server_address[1],
+ err.__class__.__name__, err))
def close_httpd(self):
"""Closes sockets for HTTP"""
- if len(self.httpd) == 0:
- return
- for ht in self.httpd:
+ while len(self.httpd)>0:
+ ht = self.httpd.pop()
logger.info(STATHTTPD_CLOSING, ht.server_address[0],
ht.server_address[1])
ht.server_close()
- self.httpd = []
def start(self):
"""Starts StatsHttpd objects to run. Waiting for client
requests by using select.select functions"""
- self.mccs.start()
self.running = True
while self.running:
try:
@@ -280,6 +275,7 @@ class StatsHttpd:
logger.info(STATHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
+ self.running = False
def get_sockets(self):
"""Returns sockets to select.select"""
@@ -296,23 +292,27 @@ class StatsHttpd:
addresses and ports to listen HTTP requests on."""
logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
new_config)
- for key in new_config.keys():
- if key not in DEFAULT_CONFIG and key != "version":
- logger.error(STATHTTPD_UNKNOWN_CONFIG_ITEM, key)
+ errors = []
+ if not self.mccs.get_module_spec().\
+ validate_config(False, new_config, errors):
return isc.config.ccsession.create_answer(
- 1, "Unknown known config: %s" % key)
+ 1, ", ".join(errors))
# backup old config
old_config = self.config.copy()
- self.close_httpd()
self.load_config(new_config)
+ # If the http sockets aren't opened or
+ # if new_config doesn't have'listen_on', it returns
+ if len(self.httpd) == 0 or 'listen_on' not in new_config:
+ return isc.config.ccsession.create_answer(0)
+ self.close_httpd()
try:
self.open_httpd()
except HttpServerError as err:
logger.error(STATHTTPD_SERVER_ERROR, err)
# restore old config
- self.config_handler(old_config)
- return isc.config.ccsession.create_answer(
- 1, "[b10-stats-httpd] %s" % err)
+ self.load_config(old_config)
+ self.open_httpd()
+ return isc.config.ccsession.create_answer(1, str(err))
else:
return isc.config.ccsession.create_answer(0)
@@ -328,8 +328,7 @@ class StatsHttpd:
logger.debug(DBG_STATHTTPD_MESSAGING,
STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
- return isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down.")
+ return isc.config.ccsession.create_answer(0)
else:
logger.debug(DBG_STATHTTPD_MESSAGING,
STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
@@ -341,8 +340,7 @@ class StatsHttpd:
the data which obtains from it"""
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'),
- self.stats_module_name)
+ isc.config.ccsession.create_command('show'), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -357,34 +355,82 @@ class StatsHttpd:
raise StatsHttpdError("Stats module: %s" % str(value))
def get_stats_spec(self):
- """Just returns spec data"""
- return self.stats_config_spec
-
- def load_templates(self):
- """Setup the bodies of XSD and XSL documents to be responds to
- HTTP clients. Before that it also creates XML tag structures by
- using xml.etree.ElementTree.Element class and substitutes
- concrete strings with parameters embed in the string.Template
- object."""
+ """Requests statistics data to the Stats daemon and returns
+ the data which obtains from it"""
+ try:
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command('showschema'), 'Stats')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ return value
+ else:
+ raise StatsHttpdError("Stats module: %s" % str(value))
+ except (isc.cc.session.SessionTimeout,
+ isc.cc.session.SessionError) as err:
+ raise StatsHttpdError("%s: %s" %
+ (err.__class__.__name__, err))
+
+ def xml_handler(self):
+ """Handler which requests to Stats daemon to obtain statistics
+ data and returns the body of XML document"""
+ xml_list=[]
+ for (mod, spec) in self.get_stats_data().items():
+ if not spec: continue
+ elem1 = xml.etree.ElementTree.Element(str(mod))
+ for (k, v) in spec.items():
+ elem2 = xml.etree.ElementTree.Element(str(k))
+ elem2.text = str(v)
+ elem1.append(elem2)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xml_list.append(
+ str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
+ encoding='us-ascii'))
+ xml_string = "".join(xml_list)
+ self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
+ xml_string=xml_string,
+ xsd_namespace=XSD_NAMESPACE,
+ xsd_url_path=XSD_URL_PATH,
+ xsl_url_path=XSL_URL_PATH)
+ assert self.xml_body is not None
+ return self.xml_body
+
+ def xsd_handler(self):
+ """Handler which just returns the body of XSD document"""
# for XSD
xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for item in self.get_stats_spec():
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- xsd_root.append(element)
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ alltag = xml.etree.ElementTree.Element("all")
+ for item in spec:
+ element = xml.etree.ElementTree.Element(
+ "element",
+ dict( name=item["item_name"],
+ type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
+ minOccurs="1",
+ maxOccurs="1" ),
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ appinfo.text = item["item_title"]
+ documentation.text = item["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ element.append(annotation)
+ alltag.append(element)
+
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
+ mod_element.append(complextype)
+ xsd_root.append(mod_element)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -398,25 +444,33 @@ class StatsHttpd:
xsd_namespace=XSD_NAMESPACE
)
assert self.xsd_body is not None
+ return self.xsd_body
+ def xsl_handler(self):
+ """Handler which just returns the body of XSL document"""
# for XSL
xsd_root = xml.etree.ElementTree.Element(
"xsl:template",
dict(match="*")) # started with xml:template tag
- for item in self.get_stats_spec():
- tr = xml.etree.ElementTree.Element("tr")
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ for item in spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td0 = xml.etree.ElementTree.Element("td")
+ td0.text = str(mod)
+ td1 = xml.etree.ElementTree.Element(
+ "td", { "class" : "title",
+ "title" : item["item_description"] })
+ td1.text = item["item_title"]
+ td2 = xml.etree.ElementTree.Element("td")
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ dict(select=mod+'/'+item["item_name"]))
+ td2.append(xsl_valueof)
+ tr.append(td0)
+ tr.append(td1)
+ tr.append(td2)
+ xsd_root.append(tr)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -429,47 +483,15 @@ class StatsHttpd:
xsl_string=xsl_string,
xsd_namespace=XSD_NAMESPACE)
assert self.xsl_body is not None
-
- def xml_handler(self):
- """Handler which requests to Stats daemon to obtain statistics
- data and returns the body of XML document"""
- xml_list=[]
- for (k, v) in self.get_stats_data().items():
- (k, v) = (str(k), str(v))
- elem = xml.etree.ElementTree.Element(k)
- elem.text = v
- # The coding conversion is tricky. xml..tostring() of Python 3.2
- # returns bytes (not string) regardless of the coding, while
- # tostring() of Python 3.1 returns a string. To support both
- # cases transparently, we first make sure tostring() returns
- # bytes by specifying utf-8 and then convert the result to a
- # plain string (code below assume it).
- xml_list.append(
- str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
- encoding='us-ascii'))
- xml_string = "".join(xml_list)
- self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
- xml_string=xml_string,
- xsd_namespace=XSD_NAMESPACE,
- xsd_url_path=XSD_URL_PATH,
- xsl_url_path=XSL_URL_PATH)
- assert self.xml_body is not None
- return self.xml_body
-
- def xsd_handler(self):
- """Handler which just returns the body of XSD document"""
- return self.xsd_body
-
- def xsl_handler(self):
- """Handler which just returns the body of XSL document"""
return self.xsl_body
def open_template(self, file_name):
"""It opens a template file, and it loads all lines to a
string variable and returns string. Template object includes
the variable. Limitation of a file size isn't needed there."""
- lines = "".join(
- open(file_name, 'r').readlines())
+ f = open(file_name, 'r')
+ lines = "".join(f.readlines())
+ f.close()
assert lines is not None
return string.Template(lines)
@@ -491,7 +513,7 @@ if __name__ == "__main__":
logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
sys.exit(1)
except HttpServerError as hse:
- logger.fatal(STATHTTPD_START_SERVER_ERROR, hse)
+ logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
index 9ad07cf..cfffb3a 100644
--- a/src/bin/stats/stats_messages.mes
+++ b/src/bin/stats/stats_messages.mes
@@ -28,16 +28,6 @@ control bus. A likely problem is that the message bus daemon
This debug message is printed when the stats module has received a
configuration update from the configuration manager.
-% STATS_RECEIVED_REMOVE_COMMAND received command to remove %1
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-
-% STATS_RECEIVED_RESET_COMMAND received command to reset all statistics
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
-
% STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
The stats module received a command to show all statistics that it has
collected.
@@ -72,4 +62,15 @@ installation problem, where the specification file stats.spec is
from a different version of BIND 10 than the stats module itself.
Please check your installation.
+% STATS_STARTING starting
+The stats module will be now starting.
+
+% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema
+The stats module received a command to show all statistics schemas of all modules.
+
+% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1
+The stats module received a command to show the specified statistics schema of the specified module.
+% STATS_START_ERROR stats module error: %1
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index ee79de2..b5edc59 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,8 +1,7 @@
-SUBDIRS = isc http testdata
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
-EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
-CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+EXTRA_DIST = $(PYTESTS) test_utils.py
+CLEANFILES = test_utils.pyc
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
@@ -14,15 +13,16 @@ endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests:$(abs_top_builddir)/src/bin/msgq:$(abs_top_builddir)/src/lib/python/isc/config \
B10_FROM_SOURCE=$(abs_top_srcdir) \
+ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 6d72dc2..e867080 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -13,147 +13,269 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats http server in a
+close to real environment.
+"""
+
import unittest
import os
-import http.server
-import string
-import fake_select
import imp
-import sys
-import fake_socket
-
-import isc.cc
+import socket
+import errno
+import select
+import string
+import time
+import threading
+import http.client
+import xml.etree.ElementTree
+import random
+import isc
import stats_httpd
-stats_httpd.socket = fake_socket
-stats_httpd.select = fake_select
+import stats
+from test_utils import BaseModules, ThreadingServerManager, MyStats, MyStatsHttpd, SignalHandler, send_command, send_shutdown
DUMMY_DATA = {
- "auth.queries.tcp": 10000,
- "auth.queries.udp": 12000,
- "bind10.boot_time": "2011-03-04T11:59:05Z",
- "report_time": "2011-03-04T11:59:19Z",
- "stats.boot_time": "2011-03-04T11:59:06Z",
- "stats.last_update_time": "2011-03-04T11:59:07Z",
- "stats.lname": "4d70d40a_c at host",
- "stats.start_time": "2011-03-04T11:59:06Z",
- "stats.timestamp": 1299239959.560846
+ 'Boss' : {
+ "boot_time": "2011-03-04T11:59:06Z"
+ },
+ 'Auth' : {
+ "queries.tcp": 2,
+ "queries.udp": 3
+ },
+ 'Stats' : {
+ "report_time": "2011-03-04T11:59:19Z",
+ "boot_time": "2011-03-04T11:59:06Z",
+ "last_update_time": "2011-03-04T11:59:07Z",
+ "lname": "4d70d40a_c at host",
+ "timestamp": 1299239959.560846
+ }
}
-def push_answer(stats_httpd):
- stats_httpd.cc_session.group_sendmsg(
- { 'result':
- [ 0, DUMMY_DATA ] }, "Stats")
-
-def pull_query(stats_httpd):
- (msg, env) = stats_httpd.cc_session.group_recvmsg()
- if 'result' in msg:
- (ret, arg) = isc.config.ccsession.parse_answer(msg)
- else:
- (ret, arg) = isc.config.ccsession.parse_command(msg)
- return (ret, arg, env)
+def get_availaddr(address='127.0.0.1', port=8001):
+ """returns a tuple of address and port which is available to
+ listen on the platform. The first argument is a address for
+ search. The second argument is a port for search. If a set of
+ address and port is failed on the search for the availability, the
+ port number is increased and it goes on the next trial until the
+ available set of address and port is looked up. If the port number
+ reaches over 65535, it may stop the search and raise a
+ OverflowError exception."""
+ while True:
+ for addr in socket.getaddrinfo(
+ address, port, 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP):
+ sock = socket.socket(addr[0], socket.SOCK_STREAM)
+ try:
+ sock.bind((address, port))
+ return (address, port)
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ # This address and port number are already in use.
+ # next port number is added
+ port = port + 1
+
+def is_ipv6_enabled(address='::1', port=8001):
+ """checks IPv6 enabled on the platform. address for check is '::1'
+ and port for check is random number between 8001 and
+ 65535. Retrying is 3 times even if it fails. The built-in socket
+ module provides a 'has_ipv6' parameter, but it's not used here
+ because there may be a situation where the value is True on an
+ environment where the IPv6 config is disabled."""
+ for p in random.sample(range(port, 65535), 3):
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ sock.bind((address, p))
+ return True
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ return False
class TestHttpHandler(unittest.TestCase):
"""Tests for HttpHandler class"""
-
def setUp(self):
- self.stats_httpd = stats_httpd.StatsHttpd()
- self.assertTrue(type(self.stats_httpd.httpd) is list)
- self.httpd = self.stats_httpd.httpd
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ (self.address, self.port) = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+ self.stats_httpd = self.stats_httpd_server.server
+ self.stats_httpd_server.run()
+ self.client = http.client.HTTPConnection(self.address, self.port)
+ self.client._http_vsn_str = 'HTTP/1.0\n'
+ self.client.connect()
- def test_do_GET(self):
- for ht in self.httpd:
- self._test_do_GET(ht._handler)
+ def tearDown(self):
+ self.client.close()
+ self.stats_httpd_server.shutdown()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
- def _test_do_GET(self, handler):
+ def test_do_GET(self):
+ self.assertTrue(type(self.stats_httpd.httpd) is list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
# URL is '/bind10/statistics/xml'
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- handler.do_GET()
- (ret, arg, env) = pull_query(self.stats_httpd)
- self.assertEqual(ret, "show")
- self.assertIsNone(arg)
- self.assertTrue('group' in env)
- self.assertEqual(env['group'], 'Stats')
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_URL_PATH)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
- self.assertTrue(handler.response.body.find(str(v))>0)
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('stats_data') > 0)
+ for (k,v) in root.attrib.items():
+ if k.find('schemaLocation') > 0:
+ self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
+ for mod in DUMMY_DATA:
+ for (item, value) in DUMMY_DATA[mod].items():
+ self.assertIsNotNone(root.find(mod + '/' + item))
# URL is '/bind10/statitics/xsd'
- handler.path = stats_httpd.XSD_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
+ xsdpath = '/'.join(tags)
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ for elm in root.findall(xsdpath):
+ self.assertIsNotNone(elm.attrib['name'])
+ self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
# URL is '/bind10/statitics/xsl'
- handler.path = stats_httpd.XSL_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ xslpath = url_trans + 'template/' + url_xhtml + 'tr'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ for tr in root.findall(xslpath):
+ tds = tr.findall(url_xhtml + 'td')
+ self.assertIsNotNone(tds)
+ self.assertEqual(type(tds), list)
+ self.assertTrue(len(tds) > 2)
+ self.assertTrue(hasattr(tds[0], 'text'))
+ self.assertTrue(tds[0].text in DUMMY_DATA)
+ valueof = tds[2].find(url_trans + 'value-of')
+ self.assertIsNotNone(valueof)
+ self.assertTrue(hasattr(valueof, 'attrib'))
+ self.assertIsNotNone(valueof.attrib)
+ self.assertTrue('select' in valueof.attrib)
+ self.assertTrue(valueof.attrib['select'] in \
+ [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
# 302 redirect
- handler.path = '/'
- handler.headers = {'Host': 'my.host.domain'}
- handler.do_GET()
- self.assertEqual(handler.response.code, 302)
- self.assertEqual(handler.response.headers["Location"],
- "http://my.host.domain%s" % stats_httpd.XML_URL_PATH)
+ self.client._http_vsn_str = 'HTTP/1.1'
+ self.client.putrequest('GET', '/')
+ self.client.putheader('Host', self.address)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 302)
+ self.assertEqual(response.getheader('Location'),
+ "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
# 404 NotFound
- handler.path = '/path/to/foo/bar'
- handler.headers = {}
- handler.do_GET()
- self.assertEqual(handler.response.code, 404)
-
- # failure case(connection with Stats is down)
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = True
- handler.do_GET()
- self.stats_httpd.cc_session._socket._closed = False
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
-
- # failure case(Stats module returns err)
- handler.path = stats_httpd.XML_URL_PATH
- self.stats_httpd.cc_session.group_sendmsg(
- { 'result': [ 1, "I have an error." ] }, "Stats")
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = False
- handler.do_GET()
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+
+ def test_do_GET_failed1(self):
+ # checks status
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ # failure case(Stats is down)
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None)) # Stats is down
+ self.assertFalse(self.stats.running)
+ self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ def test_do_GET_failed2(self):
+ # failure case(Stats replies an error)
+ self.stats.mccs.set_command_handler(
+ lambda cmd, args: \
+ isc.config.ccsession.create_answer(1, "I have an error.")
+ )
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
def test_do_HEAD(self):
- for ht in self.httpd:
- self._test_do_HEAD(ht._handler)
+ self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
- def _test_do_HEAD(self, handler):
- handler.path = '/path/to/foo/bar'
- handler.do_HEAD()
- self.assertEqual(handler.response.code, 404)
+ self.client.putrequest('HEAD', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
class TestHttpServerError(unittest.TestCase):
"""Tests for HttpServerError exception"""
-
def test_raises(self):
try:
raise stats_httpd.HttpServerError('Nothing')
@@ -162,17 +284,24 @@ class TestHttpServerError(unittest.TestCase):
class TestHttpServer(unittest.TestCase):
"""Tests for HttpServer class"""
+ def setUp(self):
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+
+ def tearDown(self):
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_httpserver(self):
- self.stats_httpd = stats_httpd.StatsHttpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
- self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
- self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
- self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
- self.assertEqual(ht.log_writer, self.stats_httpd.write_log)
- self.assertTrue(isinstance(ht._handler, stats_httpd.HttpHandler))
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertEqual(type(self.stats_httpd.httpd), list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ for httpd in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(httpd, stats_httpd.HttpServer))
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
@@ -187,132 +316,173 @@ class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
def setUp(self):
- fake_socket._CLOSED = False
- fake_socket.has_ipv6 = True
- self.stats_httpd = stats_httpd.StatsHttpd()
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats_server.run()
+ # checking IPv6 enabled on this platform
+ self.ipv6_enabled = is_ipv6_enabled()
def tearDown(self):
- self.stats_httpd.stop()
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_init(self):
- self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
- self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
- id(self.stats_httpd.mccs.get_socket()))
- for ht in self.stats_httpd.httpd:
- self.assertFalse(ht.socket._closed)
- self.assertEqual(ht.socket.fileno(), id(ht.socket))
- fake_socket._CLOSED = True
- self.assertRaises(isc.cc.session.SessionError,
- stats_httpd.StatsHttpd)
- fake_socket._CLOSED = False
+ server_address = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_address)
+ self.assertEqual(self.stats_httpd.running, False)
+ self.assertEqual(self.stats_httpd.poll_intval, 0.5)
+ self.assertNotEqual(len(self.stats_httpd.httpd), 0)
+ self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
+ self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
+ self.assertEqual(len(self.stats_httpd.config), 2)
+ self.assertTrue('listen_on' in self.stats_httpd.config)
+ self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
+ self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
+
+ def test_openclose_mccs(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.close_mccs()
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.stats_httpd.open_mccs()
+ self.assertIsNotNone(self.stats_httpd.mccs)
+ self.stats_httpd.mccs = None
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.assertEqual(self.stats_httpd.close_mccs(), None)
def test_mccs(self):
- self.stats_httpd.open_mccs()
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
self.assertTrue(
- isinstance(self.stats_httpd.mccs.get_socket(), fake_socket.socket))
+ isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
self.assertTrue(
isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
- self.assertTrue(
- isinstance(self.stats_httpd.stats_module_spec, isc.config.ModuleSpec))
- for cfg in self.stats_httpd.stats_config_spec:
- self.assertTrue('item_name' in cfg)
- self.assertTrue(cfg['item_name'] in DUMMY_DATA)
- self.assertTrue(len(self.stats_httpd.stats_config_spec), len(DUMMY_DATA))
-
- def test_load_config(self):
- self.stats_httpd.load_config()
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
+ statistics_spec = self.stats_httpd.get_stats_spec()
+ for mod in DUMMY_DATA:
+ self.assertTrue(mod in statistics_spec)
+ for cfg in statistics_spec[mod]:
+ self.assertTrue('item_name' in cfg)
+ self.assertTrue(cfg['item_name'] in DUMMY_DATA[mod])
+ self.assertTrue(len(statistics_spec[mod]), len(DUMMY_DATA[mod]))
+ self.stats_httpd.close_mccs()
+ self.assertIsNone(self.stats_httpd.mccs)
def test_httpd(self):
# dual stack (addresses is ipv4 and ipv6)
- fake_socket.has_ipv6 = True
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
- self.stats_httpd.http_addrs = [ ('::1', 8000), ('127.0.0.1', 8000) ]
- self.assertTrue(
- stats_httpd.HttpServer.address_family in set([fake_socket.AF_INET, fake_socket.AF_INET6]))
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ if self.ipv6_enabled:
+ server_addresses = (get_availaddr('::1'), get_availaddr())
+ self.stats_httpd = MyStatsHttpd(*server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+ self.assertTrue(isinstance(ht.socket, socket.socket))
# dual stack (address is ipv6)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.stats_httpd.open_httpd()
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr('::1')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # dual/single stack (address is ipv4)
+ server_addresses = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
- # dual stack (address is ipv4)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
+ # any address (IPv4)
+ server_addresses = get_availaddr(address='0.0.0.0')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack (force set ipv6 )
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.assertRaises(stats_httpd.HttpServerError,
- self.stats_httpd.open_httpd)
-
- # hostname
- self.stats_httpd.http_addrs = [ ('localhost', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- self.stats_httpd.http_addrs = [ ('my.host.domain', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # any address (IPv6)
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr(address='::')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # existent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+ get_availaddr(address='localhost'))
+
+ # nonexistent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
# over flow of port number
- self.stats_httpd.http_addrs = [ ('', 80000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+
# negative
- self.stats_httpd.http_addrs = [ ('', -8000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
- # alphabet
- self.stats_httpd.http_addrs = [ ('', 'ABCDE') ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
-
- def test_start(self):
- self.stats_httpd.cc_session.group_sendmsg(
- { 'command': [ "shutdown" ] }, "StatsHttpd")
- self.stats_httpd.start()
- self.stats_httpd = stats_httpd.StatsHttpd()
- self.assertRaises(
- fake_select.error, self.stats_httpd.start)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
- def test_stop(self):
- # success case
- fake_socket._CLOSED = False
- self.stats_httpd.stop()
+ # alphabet
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
+
+ # Address already in use
+ server_addresses = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
+ self.stats_httpd_server.run()
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
+ send_shutdown("StatsHttpd")
+
+ def test_running(self):
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd = self.stats_httpd_server.server
self.assertFalse(self.stats_httpd.running)
- self.assertIsNone(self.stats_httpd.mccs)
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.socket._closed)
- self.assertTrue(self.stats_httpd.cc_session._socket._closed)
+ self.stats_httpd_server.run()
+ self.assertEqual(send_command("status", "StatsHttpd"),
+ (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats_httpd.running)
+ self.assertEqual(send_shutdown("StatsHttpd"), (0, None))
+ self.assertFalse(self.stats_httpd.running)
+ self.stats_httpd_server.shutdown()
+
# failure case
- self.stats_httpd.cc_session._socket._closed = False
- self.stats_httpd.open_mccs()
- self.stats_httpd.cc_session._socket._closed = True
- self.stats_httpd.stop() # No excetion raises
- self.stats_httpd.cc_session._socket._closed = False
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.cc_session.close()
+ self.assertRaises(ValueError, self.stats_httpd.start)
+
+ def test_failure_with_a_select_error (self):
+ """checks select.error is raised if the exception except
+ errno.EINTR is raised while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error('dummy error')
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertRaises(select.error, self.stats_httpd.start)
+ stats_httpd.select.select = orig_select
+
+ def test_nofailure_with_errno_EINTR(self):
+ """checks no exception is raised if errno.EINTR is raised
+ while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error(errno.EINTR)
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd_server.run()
+ self.stats_httpd_server.shutdown()
+ stats_httpd.select.select = orig_select
def test_open_template(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
# successful conditions
tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
@@ -346,13 +516,13 @@ class TestStatsHttpd(unittest.TestCase):
self.stats_httpd.open_template, '/path/to/foo/bar')
def test_commands(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(self.stats_httpd.command_handler("status", None),
isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
self.stats_httpd.running = True
self.assertEqual(self.stats_httpd.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down."))
+ isc.config.ccsession.create_answer(0))
self.assertFalse(self.stats_httpd.running)
self.assertEqual(
self.stats_httpd.command_handler("__UNKNOWN_COMMAND__", None),
@@ -360,42 +530,48 @@ class TestStatsHttpd(unittest.TestCase):
1, "Unknown command: __UNKNOWN_COMMAND__"))
def test_config(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(
self.stats_httpd.config_handler(dict(_UNKNOWN_KEY_=None)),
isc.config.ccsession.create_answer(
- 1, "Unknown known config: _UNKNOWN_KEY_"))
- self.assertEqual(
- self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::2",port=8000)])),
- isc.config.ccsession.create_answer(0))
- self.assertTrue("listen_on" in self.stats_httpd.config)
- for addr in self.stats_httpd.config["listen_on"]:
- self.assertTrue("address" in addr)
- self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::2")
- self.assertTrue(addr["port"] == 8000)
+ 1, "unknown item _UNKNOWN_KEY_"))
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::1",port=80)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::1")
- self.assertTrue(addr["port"] == 80)
-
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ if self.ipv6_enabled:
+ addresses = get_availaddr("::1")
+ self.assertEqual(
+ self.stats_httpd.config_handler(
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
+ isc.config.ccsession.create_answer(0))
+ self.assertTrue("listen_on" in self.stats_httpd.config)
+ for addr in self.stats_httpd.config["listen_on"]:
+ self.assertTrue("address" in addr)
+ self.assertTrue("port" in addr)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="1.2.3.4",port=54321)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "1.2.3.4")
- self.assertTrue(addr["port"] == 54321)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
(ret, arg) = isc.config.ccsession.parse_answer(
self.stats_httpd.config_handler(
dict(listen_on=[dict(address="1.2.3.4",port=543210)]))
@@ -403,93 +579,103 @@ class TestStatsHttpd(unittest.TestCase):
self.assertEqual(ret, 1)
def test_xml_handler(self):
- orig_get_stats_data = stats_httpd.StatsHttpd.get_stats_data
- stats_httpd.StatsHttpd.get_stats_data = lambda x: {'foo':'bar'}
- xml_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : { 'foo':'bar' } }
+ xml_body1 = self.stats_httpd.open_template(
stats_httpd.XML_TEMPLATE_LOCATION).substitute(
- xml_string='<foo>bar</foo>',
+ xml_string='<Dummy><foo>bar</foo></Dummy>',
xsd_namespace=stats_httpd.XSD_NAMESPACE,
xsd_url_path=stats_httpd.XSD_URL_PATH,
xsl_url_path=stats_httpd.XSL_URL_PATH)
- xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ xml_body2 = self.stats_httpd.xml_handler()
self.assertEqual(type(xml_body1), str)
self.assertEqual(type(xml_body2), str)
self.assertEqual(xml_body1, xml_body2)
- stats_httpd.StatsHttpd.get_stats_data = lambda x: {'bar':'foo'}
- xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : {'bar':'foo'} }
+ xml_body2 = self.stats_httpd.xml_handler()
self.assertNotEqual(xml_body1, xml_body2)
- stats_httpd.StatsHttpd.get_stats_data = orig_get_stats_data
def test_xsd_handler(self):
- orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "foo",
- "item_type": "string",
- "item_optional": False,
- "item_default": "bar",
- "item_description": "foo is bar",
- "item_title": "Foo"
- }]
- xsd_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsd_body1 = self.stats_httpd.open_template(
stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
- xsd_string='<all>' \
+ xsd_string=\
+ '<all><element name="Dummy"><complexType><all>' \
+ '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
+ '<annotation><appinfo>Foo</appinfo>' \
+ '<documentation>foo is bar</documentation>' \
- + '</annotation></element></all>',
+ + '</annotation></element></all>' \
+ + '</complexType></element></all>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
- xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ xsd_body2 = self.stats_httpd.xsd_handler()
self.assertEqual(type(xsd_body1), str)
self.assertEqual(type(xsd_body2), str)
self.assertEqual(xsd_body1, xsd_body2)
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "bar",
- "item_type": "string",
- "item_optional": False,
- "item_default": "foo",
- "item_description": "bar is foo",
- "item_title": "bar"
- }]
- xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsd_body2 = self.stats_httpd.xsd_handler()
self.assertNotEqual(xsd_body1, xsd_body2)
- stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
def test_xsl_handler(self):
- orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "foo",
- "item_type": "string",
- "item_optional": False,
- "item_default": "bar",
- "item_description": "foo is bar",
- "item_title": "Foo"
- }]
- xsl_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsl_body1 = self.stats_httpd.open_template(
stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
xsl_string='<xsl:template match="*"><tr>' \
+ + '<td>Dummy</td>' \
+ '<td class="title" title="foo is bar">Foo</td>' \
- + '<td><xsl:value-of select="foo" /></td>' \
+ + '<td><xsl:value-of select="Dummy/foo" /></td>' \
+ '</tr></xsl:template>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
- xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ xsl_body2 = self.stats_httpd.xsl_handler()
self.assertEqual(type(xsl_body1), str)
self.assertEqual(type(xsl_body2), str)
self.assertEqual(xsl_body1, xsl_body2)
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "bar",
- "item_type": "string",
- "item_optional": False,
- "item_default": "foo",
- "item_description": "bar is foo",
- "item_title": "bar"
- }]
- xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsl_body2 = self.stats_httpd.xsl_handler()
self.assertNotEqual(xsl_body1, xsl_body2)
- stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
def test_for_without_B10_FROM_SOURCE(self):
# just lets it go through the code without B10_FROM_SOURCE env
@@ -500,8 +686,6 @@ class TestStatsHttpd(unittest.TestCase):
imp.reload(stats_httpd)
os.environ["B10_FROM_SOURCE"] = tmppath
imp.reload(stats_httpd)
- stats_httpd.socket = fake_socket
- stats_httpd.select = fake_select
if __name__ == "__main__":
unittest.main()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index 2fb4ab5..3813c7e 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -13,650 +13,593 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Tests for the stats module
-#
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats module in a close
+to real environment.
+"""
+
+import unittest
import os
-import sys
+import threading
+import io
import time
-import unittest
import imp
-from isc.cc.session import Session, SessionError
-from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
-from fake_time import time, strftime, gmtime
-import stats
-stats.time = time
-stats.strftime = strftime
-stats.gmtime = gmtime
-from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
-from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-
-if "B10_FROM_SOURCE" in os.environ:
- TEST_SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
- "/src/bin/stats/tests/testdata/stats_test.spec"
-else:
- TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
-class TestStats(unittest.TestCase):
+import stats
+import isc.cc.session
+from test_utils import BaseModules, ThreadingServerManager, MyStats, SignalHandler, send_command, send_shutdown
+
+class TestUtilties(unittest.TestCase):
+ items = [
+ { 'item_name': 'test_int1', 'item_type': 'integer', 'item_default': 12345 },
+ { 'item_name': 'test_real1', 'item_type': 'real', 'item_default': 12345.6789 },
+ { 'item_name': 'test_bool1', 'item_type': 'boolean', 'item_default': True },
+ { 'item_name': 'test_str1', 'item_type': 'string', 'item_default': 'ABCD' },
+ { 'item_name': 'test_list1', 'item_type': 'list', 'item_default': [1,2,3],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map1', 'item_type': 'map', 'item_default': {'a':1,'b':2,'c':3},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'integer'},
+ { 'item_name': 'b', 'item_type': 'integer'},
+ { 'item_name': 'c', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_int2', 'item_type': 'integer' },
+ { 'item_name': 'test_real2', 'item_type': 'real' },
+ { 'item_name': 'test_bool2', 'item_type': 'boolean' },
+ { 'item_name': 'test_str2', 'item_type': 'string' },
+ { 'item_name': 'test_list2', 'item_type': 'list',
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map2', 'item_type': 'map',
+ 'map_item_spec' : [ { 'item_name': 'A', 'item_type': 'integer'},
+ { 'item_name': 'B', 'item_type': 'integer'},
+ { 'item_name': 'C', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_none', 'item_type': 'none' },
+ { 'item_name': 'test_list3', 'item_type': 'list', 'item_default': ["one","two","three"],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'string' } },
+ { 'item_name': 'test_map3', 'item_type': 'map', 'item_default': {'a':'one','b':'two','c':'three'},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'string'},
+ { 'item_name': 'b', 'item_type': 'string'},
+ { 'item_name': 'c', 'item_type': 'string'} ] }
+ ]
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session)
- self.listener = CCSessionListener(self.subject)
- self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- self.stats_data = {
- 'report_time' : get_datetime(),
- 'bind10.boot_time' : "1970-01-01T00:00:00Z",
- 'stats.timestamp' : get_timestamp(),
- 'stats.lname' : self.session.lname,
- 'auth.queries.tcp': 0,
- 'auth.queries.udp': 0,
- "stats.boot_time": get_datetime(),
- "stats.start_time": get_datetime(),
- "stats.last_update_time": get_datetime()
- }
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertEqual(len(self.session.old_message_queue), 1)
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
-
- def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
- self.session.close()
-
- def test_local_func(self):
- """
- Test for local function
-
- """
- # test for result_ok
- self.assertEqual(type(result_ok()), dict)
- self.assertEqual(result_ok(), {'result': [0]})
- self.assertEqual(result_ok(1), {'result': [1]})
- self.assertEqual(result_ok(0,'OK'), {'result': [0, 'OK']})
- self.assertEqual(result_ok(1,'Not good'), {'result': [1, 'Not good']})
- self.assertEqual(result_ok(None,"It's None"), {'result': [None, "It's None"]})
- self.assertNotEqual(result_ok(), {'RESULT': [0]})
-
- # test for get_timestamp
- self.assertEqual(get_timestamp(), _TEST_TIME_SECS)
-
- # test for get_datetime
- self.assertEqual(get_datetime(), _TEST_TIME_STRF)
-
- def test_show_command(self):
- """
- Test for show command
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command with arg
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.lname"}]}, "Stats")
- self.assertEqual(len(self.subject.session.message_queue), 1)
- self.subject.check()
- result_data = self.subject.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'stats.lname': self.stats_data['stats.lname']}),
- result_data)
- self.assertEqual(len(self.subject.session.message_queue), 0)
-
- # test show command with arg which has wrong name
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.dummy"}]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_set_command(self):
- """
- Test for set command
-
- """
- # test set command
- self.stats_data['auth.queries.udp'] = 54321
- self.assertEqual(self.stats_data['auth.queries.udp'], 54321)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.udp': 54321 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2
- self.stats_data['auth.queries.udp'] = 0
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [ "set", {'stats_data': {'auth.queries.udp': 0}} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 2
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 54322
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 54322)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.tcp': 54322 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 3
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_remove_command(self):
- """
- Test for remove command
-
- """
- self.session.group_sendmsg({"command":
- [ "remove", {"stats_item_name": 'bind10.boot_time' }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.stats_data.pop('bind10.boot_time'), "1970-01-01T00:00:00Z")
- self.assertFalse('bind10.boot_time' in self.stats_data)
-
- # test show command with arg
- self.session.group_sendmsg({"command":
- [ "show", {"stats_item_name": 'bind10.boot_time'}]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertFalse('bind10.boot_time' in result_data['result'][1])
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_reset_command(self):
- """
- Test for reset command
-
- """
- self.session.group_sendmsg({"command": [ "reset" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show" ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_status_command(self):
- """
- Test for status command
-
- """
- self.session.group_sendmsg({"command": [ "status" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(0, "I'm alive."),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_unknown_command(self):
- """
- Test for unknown command
-
- """
- self.session.group_sendmsg({"command": [ "hoge", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(1, "Unknown command: 'hoge'"),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_shutdown_command(self):
- """
- Test for shutdown command
-
- """
- self.session.group_sendmsg({"command": [ "shutdown", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.assertTrue(self.subject.running)
- self.subject.check()
- self.assertFalse(self.subject.running)
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
+ self.const_timestamp = 1308730448.965706
+ self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ stats.time = lambda : self.const_timestamp
+ stats.gmtime = lambda : self.const_timetuple
- def test_some_commands(self):
- """
- Test for some commands in a row
-
- """
- # test set command
- self.stats_data['bind10.boot_time'] = '2010-08-02T14:47:56Z'
- self.assertEqual(self.stats_data['bind10.boot_time'], '2010-08-02T14:47:56Z')
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'bind10.boot_time': '2010-08-02T14:47:56Z' }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'bind10.boot_time' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'bind10.boot_time': '2010-08-02T14:47:56Z'}),
- result_data)
- self.assertEqual(result_ok(0, {'bind10.boot_time': self.stats_data['bind10.boot_time']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2nd
- self.stats_data['auth.queries.udp'] = 98765
- self.assertEqual(self.stats_data['auth.queries.udp'], 98765)
- self.session.group_sendmsg({ "command": [
- "set", { 'stats_data': {
- 'auth.queries.udp':
- self.stats_data['auth.queries.udp']
- } }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({"command": [
- "show", {'stats_item_name': 'auth.queries.udp'}
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 4321
- self.session.group_sendmsg({"command": [
- "set",
- {'stats_data': {'auth.queries.tcp': 4321 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check value
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.tcp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': 4321}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': self.stats_data['auth.queries.tcp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.udp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 4
- self.stats_data['auth.queries.tcp'] = 67890
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'auth.queries.tcp': 67890 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command for all values
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands2(self):
- """
- Test for some commands in a row using list-type value
-
- """
- self.stats_data['listtype'] = [1, 2, 3]
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({ "command": [
- "set", {'stats_data': {'listtype': [1, 2, 3] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype'}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [1, 2, 3]}),
- result_data)
- self.assertEqual(result_ok(0, {'listtype': self.stats_data['listtype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'listtype': [3, 2, 1, 0] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [3, 2, 1, 0]}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands3(self):
- """
- Test for some commands in a row using dictionary-type value
-
- """
- self.stats_data['dicttype'] = {"a": 1, "b": 2, "c": 3}
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'dicttype': {"a": 1, "b": 2, "c": 3} }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' } ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 1, "b": 2, "c": 3}}),
- result_data)
- self.assertEqual(result_ok(0, {'dicttype': self.stats_data['dicttype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' }]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_config_update(self):
- """
- Test for config update
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "config_update", {"x-version":999} ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
-
- def test_for_boss(self):
- last_queue = self.session.old_message_queue.pop()
- self.assertEqual(
- last_queue.msg, {'command': ['getstats']})
+ def test_get_spec_defaults(self):
self.assertEqual(
- last_queue.env['group'], 'Boss')
-
-class TestStats2(unittest.TestCase):
+ stats.get_spec_defaults(self.items), {
+ 'test_int1' : 12345 ,
+ 'test_real1' : 12345.6789 ,
+ 'test_bool1' : True ,
+ 'test_str1' : 'ABCD' ,
+ 'test_list1' : [1,2,3] ,
+ 'test_map1' : {'a':1,'b':2,'c':3},
+ 'test_int2' : 0 ,
+ 'test_real2' : 0.0,
+ 'test_bool2' : False,
+ 'test_str2' : "",
+ 'test_list2' : [0],
+ 'test_map2' : { 'A' : 0, 'B' : 0, 'C' : 0 },
+ 'test_none' : None,
+ 'test_list3' : [ "one", "two", "three" ],
+ 'test_map3' : { 'a' : 'one', 'b' : 'two', 'c' : 'three' } })
+ self.assertEqual(stats.get_spec_defaults(None), {})
+ self.assertRaises(KeyError, stats.get_spec_defaults, [{'item_name':'Foo'}])
+
+ def test_get_timestamp(self):
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+
+ def test_get_datetime(self):
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertNotEqual(stats.get_datetime(
+ (2011, 6, 22, 8, 23, 40, 2, 173, 0)), self.const_datetime)
+
+class TestCallback(unittest.TestCase):
+ def setUp(self):
+ self.dummy_func = lambda *x, **y : (x, y)
+ self.dummy_args = (1,2,3)
+ self.dummy_kwargs = {'a':1,'b':2,'c':3}
+ self.cback1 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback2 = stats.Callback(
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback3 = stats.Callback(
+ command=self.dummy_func,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback4 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args
+ )
+
+ def test_init(self):
+ self.assertEqual((self.cback1.command, self.cback1.args, self.cback1.kwargs),
+ (self.dummy_func, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback2.command, self.cback2.args, self.cback2.kwargs),
+ (None, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback3.command, self.cback3.args, self.cback3.kwargs),
+ (self.dummy_func, (), self.dummy_kwargs))
+ self.assertEqual((self.cback4.command, self.cback4.args, self.cback4.kwargs),
+ (self.dummy_func, self.dummy_args, {}))
+
+ def test_call(self):
+ self.assertEqual(self.cback1(), (self.dummy_args, self.dummy_kwargs))
+ self.assertEqual(self.cback1(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback1(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+ self.assertEqual(self.cback2(), None)
+ self.assertEqual(self.cback3(), ((), self.dummy_kwargs))
+ self.assertEqual(self.cback3(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback3(a=100, b=200), ((), {'a':100, 'b':200}))
+ self.assertEqual(self.cback4(), (self.dummy_args, {}))
+ self.assertEqual(self.cback4(100, 200), ((100, 200), {}))
+ self.assertEqual(self.cback4(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+class TestStats(unittest.TestCase):
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session)
- self.listener = CCSessionListener(self.subject)
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats = stats.Stats()
+ self.const_timestamp = 1308730448.965706
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ self.const_default_datetime = '1970-01-01T00:00:00Z'
def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
+
+ def test_init(self):
+ self.assertEqual(self.stats.module_name, 'Stats')
+ self.assertFalse(self.stats.running)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_status' in self.stats.callbacks)
+ self.assertTrue('command_shutdown' in self.stats.callbacks)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_showschema' in self.stats.callbacks)
+ self.assertTrue('command_set' in self.stats.callbacks)
+
+ def test_init_undefcmd(self):
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "_undef_command_",
+ "command_description": "a undefined command in stats",
+ "command_args": []
+ }
+ ],
+ "statistics": []
+ }
+}
+"""
+ orig_spec_location = stats.SPECFILE_LOCATION
+ stats.SPECFILE_LOCATION = io.StringIO(spec_str)
+ self.assertRaises(stats.StatsError, stats.Stats)
+ stats.SPECFILE_LOCATION = orig_spec_location
+
+ def test_start(self):
+ # start without err
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.assertFalse(self.stats.running)
+ self.stats_server.run()
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None))
+ self.assertFalse(self.stats.running)
+ self.stats_server.shutdown()
+
+ # start with err
+ self.stats = stats.Stats()
+ self.stats.update_statistics_data = lambda x,**y: ['an error']
+ self.assertRaises(stats.StatsError, self.stats.start)
+
+ def test_handlers(self):
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ # config_handler
+ self.assertEqual(self.stats.config_handler({'foo':'bar'}),
+ isc.config.create_answer(0))
+
+ # command_handler
+ self.base.boss.server._started.wait()
+ self.base.boss.server._started.clear()
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command(
+ 'set', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'data' : { 'boot_time' : self.const_datetime } }),
+ (0, None))
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command('status', 'Stats'),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ (rcode, value) = send_command('show', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ self.assertTrue('boot_time' in value['Boss'])
+ self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertTrue('report_time' in value['Stats'])
+ self.assertTrue('boot_time' in value['Stats'])
+ self.assertTrue('last_update_time' in value['Stats'])
+ self.assertTrue('timestamp' in value['Stats'])
+ self.assertTrue('lname' in value['Stats'])
+ (rcode, value) = send_command('showschema', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ for item in value['Boss']:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+ for item in value['Stats']:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
- def test_specfile(self):
+ self.assertEqual(
+ send_command('__UNKNOWN__', 'Stats'),
+ (1, "Unknown command: '__UNKNOWN__'"))
+
+ self.stats_server.shutdown()
+
+ def test_update_modules(self):
+ self.assertEqual(len(self.stats.modules), 0)
+ self.stats.update_modules()
+ self.assertTrue('Stats' in self.stats.modules)
+ self.assertTrue('Boss' in self.stats.modules)
+ self.assertFalse('Dummy' in self.stats.modules)
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertEqual(my_statistics_data['report_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['timestamp'], 0.0)
+ self.assertEqual(my_statistics_data['lname'], "")
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ orig_parse_answer = stats.isc.config.ccsession.parse_answer
+ stats.isc.config.ccsession.parse_answer = lambda x: (99, 'error')
+ self.assertRaises(stats.StatsError, self.stats.update_modules)
+ stats.isc.config.ccsession.parse_answer = orig_parse_answer
+
+ def test_get_statistics_data(self):
+ my_statistics_data = self.stats.get_statistics_data()
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('Boss' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('boot_time' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
+ self.assertEqual(my_statistics_data, 0.0)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
+ self.assertEqual(my_statistics_data, '')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Stats', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Foo', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ name='Bar')
+
+ def test_update_statistics_data(self):
+ self.stats.update_statistics_data(owner='Stats', lname='foo at bar')
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['lname'], 'foo at bar')
+ self.stats.update_statistics_data(owner='Stats', last_update_time=self.const_datetime)
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_datetime)
+ self.assertEqual(self.stats.update_statistics_data(owner='Stats', lname=0.0),
+ ['0.0 should be a string'])
+ self.assertEqual(self.stats.update_statistics_data(owner='Dummy', foo='bar'),
+ ['unknown module name: Dummy'])
+
+ def test_commands(self):
+ # status
+ self.assertEqual(self.stats.command_status(),
+ isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ # shutdown
+ self.stats.running = True
+ self.assertEqual(self.stats.command_shutdown(),
+ isc.config.create_answer(0))
+ self.assertFalse(self.stats.running)
+
+ def test_command_show(self):
+ self.assertEqual(self.stats.command_show(owner='Foo', name=None),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_show(owner='Auth'),
+ isc.config.create_answer(
+ 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
+ isc.config.create_answer(
+ 0, 0))
+ orig_get_timestamp = stats.get_timestamp
+ orig_get_datetime = stats.get_datetime
+ stats.get_timestamp = lambda : self.const_timestamp
+ stats.get_datetime = lambda : self.const_datetime
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
+ isc.config.create_answer(0, self.const_datetime))
+ self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
+ self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
+ stats.get_timestamp = orig_get_timestamp
+ stats.get_datetime = orig_get_datetime
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertRaises(
+ stats.StatsError, self.stats.command_show, owner='Foo', name='bar')
+
+ def test_command_showchema(self):
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema())
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Auth' in value)
+ self.assertFalse('__Dummy__' in value)
+ schema = value['Stats']
+ self.assertEqual(len(schema), 5)
+ for item in schema:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ schema = value['Boss']
+ self.assertEqual(len(schema), 1)
+ for item in schema:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+
+ schema = value['Auth']
+ self.assertEqual(len(schema), 2)
+ for item in schema:
+ self.assertTrue(len(item) == 6)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ for item in value:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats', name='report_time'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ self.assertTrue(len(value) == 7)
+ self.assertTrue('item_name' in value)
+ self.assertTrue('item_type' in value)
+ self.assertTrue('item_optional' in value)
+ self.assertTrue('item_default' in value)
+ self.assertTrue('item_title' in value)
+ self.assertTrue('item_description' in value)
+ self.assertTrue('item_format' in value)
+ self.assertEqual(value['item_name'], 'report_time')
+ self.assertEqual(value['item_format'], 'date-time')
+
+ self.assertEqual(self.stats.command_showschema(owner='Foo'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_showschema(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_showschema(owner='Auth'),
+ isc.config.create_answer(
+ 0, [{
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ },
+ {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially",
+ "item_name": "queries.udp",
+ "item_optional": False,
+ "item_title": "Queries UDP",
+ "item_type": "integer"
+ }]))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
+ isc.config.create_answer(
+ 0, {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ }))
+
+ self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Stats, name: bar"))
+ self.assertEqual(self.stats.command_showschema(name='bar'),
+ isc.config.create_answer(
+ 1, "module name is not specified"))
+
+ def test_command_set(self):
+ orig_get_datetime = stats.get_datetime
+ stats.get_datetime = lambda : self.const_datetime
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_set(owner='Boss',
+ data={ 'boot_time' : self.const_datetime }))
+ stats.get_datetime = orig_get_datetime
+ self.assertEqual(rcode, 0)
+ self.assertTrue(value is None)
+ self.assertEqual(self.stats.statistics_data['Boss']['boot_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.statistics_data['Stats']['last_update_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : 'foo at bar' }),
+ isc.config.create_answer(0, None))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: unknown item lname"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: No statistics specification"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [
+ {
+ "item_name": "dummy",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "brabra"
+ } ] } )
+ self.assertRaises(stats.StatsError,
+ self.stats.command_set, owner='Stats', data={ 'dummy' : '_xxxx_yyyy_zzz_' })
+
+class TestOSEnv(unittest.TestCase):
+ def test_osenv(self):
"""
- Test for specfile
-
+ test for the environ variable "B10_FROM_SOURCE"
+ "B10_FROM_SOURCE" is set in Makefile
"""
- if "B10_FROM_SOURCE" in os.environ:
- self.assertEqual(stats.SPECFILE_LOCATION,
+ # test case having B10_FROM_SOURCE
+ self.assertTrue("B10_FROM_SOURCE" in os.environ)
+ self.assertEqual(stats.SPECFILE_LOCATION, \
os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats.spec")
- self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
- os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats-schema.spec")
+ "src" + os.sep + "bin" + os.sep + "stats" + \
+ os.sep + "stats.spec")
+ # test case not having B10_FROM_SOURCE
+ path = os.environ["B10_FROM_SOURCE"]
+ os.environ.pop("B10_FROM_SOURCE")
+ self.assertFalse("B10_FROM_SOURCE" in os.environ)
+ # import stats again
+ imp.reload(stats)
+ # revert the changes
+ os.environ["B10_FROM_SOURCE"] = path
imp.reload(stats)
- # change path of SPECFILE_LOCATION
- stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
- self.subject = stats.SessionSubject(session=self.session)
- self.session = self.subject.session
- self.listener = stats.CCSessionListener(self.subject)
-
- self.assertEqual(self.listener.stats_spec, [])
- self.assertEqual(self.listener.stats_data, {})
-
- self.assertEqual(self.listener.commands_spec, [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }])
-
- def test_func_initialize_data(self):
- """
- Test for initialize_data function
-
- """
- # prepare for sample data set
- stats_spec = [
- {
- "item_name": "none_sample",
- "item_type": "null",
- "item_default": "None"
- },
- {
- "item_name": "boolean_sample",
- "item_type": "boolean",
- "item_default": True
- },
- {
- "item_name": "string_sample",
- "item_type": "string",
- "item_default": "A something"
- },
- {
- "item_name": "int_sample",
- "item_type": "integer",
- "item_default": 9999999
- },
- {
- "item_name": "real_sample",
- "item_type": "real",
- "item_default": 0.0009
- },
- {
- "item_name": "list_sample",
- "item_type": "list",
- "item_default": [0, 1, 2, 3, 4],
- "list_item_spec": []
- },
- {
- "item_name": "map_sample",
- "item_type": "map",
- "item_default": {'name':'value'},
- "map_item_spec": []
- },
- {
- "item_name": "other_sample",
- "item_type": "__unknown__",
- "item_default": "__unknown__"
- }
- ]
- # data for comparison
- stats_data = {
- 'none_sample': None,
- 'boolean_sample': True,
- 'string_sample': 'A something',
- 'int_sample': 9999999,
- 'real_sample': 0.0009,
- 'list_sample': [0, 1, 2, 3, 4],
- 'map_sample': {'name':'value'},
- 'other_sample': '__unknown__'
- }
- self.assertEqual(self.listener.initialize_data(stats_spec), stats_data)
-
- def test_func_main(self):
- # explicitly make failed
- self.session.close()
- stats.main(session=self.session)
- def test_osenv(self):
- """
- test for not having environ "B10_FROM_SOURCE"
- """
- if "B10_FROM_SOURCE" in os.environ:
- path = os.environ["B10_FROM_SOURCE"]
- os.environ.pop("B10_FROM_SOURCE")
- imp.reload(stats)
- os.environ["B10_FROM_SOURCE"] = path
- imp.reload(stats)
-
-def result_ok(*args):
- if args:
- return { 'result': list(args) }
- else:
- return { 'result': [ 0 ] }
+def test_main():
+ unittest.main()
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/src/bin/stats/tests/fake_select.py b/src/bin/stats/tests/fake_select.py
deleted file mode 100644
index ca0ca82..0000000
--- a/src/bin/stats/tests/fake_select.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of select
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-import errno
-
-class error(Exception):
- pass
-
-def select(rlst, wlst, xlst, timeout):
- if type(timeout) != int and type(timeout) != float:
- raise TypeError("Error: %s must be integer or float"
- % timeout.__class__.__name__)
- for s in rlst + wlst + xlst:
- if type(s) != fake_socket.socket:
- raise TypeError("Error: %s must be a dummy socket"
- % s.__class__.__name__)
- s._called = s._called + 1
- if s._called > 3:
- raise error("Something is happened!")
- elif s._called > 2:
- raise error(errno.EINTR)
- return (rlst, wlst, xlst)
diff --git a/src/bin/stats/tests/fake_socket.py b/src/bin/stats/tests/fake_socket.py
deleted file mode 100644
index 4e3a458..0000000
--- a/src/bin/stats/tests/fake_socket.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of socket
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import re
-
-AF_INET = 'AF_INET'
-AF_INET6 = 'AF_INET6'
-_ADDRFAMILY = AF_INET
-has_ipv6 = True
-_CLOSED = False
-
-class gaierror(Exception):
- pass
-
-class error(Exception):
- pass
-
-class socket:
-
- def __init__(self, family=None):
- if family is None:
- self.address_family = _ADDRFAMILY
- else:
- self.address_family = family
- self._closed = _CLOSED
- if self._closed:
- raise error('socket is already closed!')
- self._called = 0
-
- def close(self):
- self._closed = True
-
- def fileno(self):
- return id(self)
-
- def bind(self, server_class):
- (self.server_address, self.server_port) = server_class
- if self.address_family not in set([AF_INET, AF_INET6]):
- raise error("Address family not supported by protocol: %s" % self.address_family)
- if self.address_family == AF_INET6 and not has_ipv6:
- raise error("Address family not supported in this machine: %s has_ipv6: %s"
- % (self.address_family, str(has_ipv6)))
- if self.address_family == AF_INET and re.search(':', self.server_address) is not None:
- raise gaierror("Address family for hostname not supported : %s %s" % (self.server_address, self.address_family))
- if self.address_family == AF_INET6 and re.search(':', self.server_address) is None:
- raise error("Cannot assign requested address : %s" % str(self.server_address))
- if type(self.server_port) is not int:
- raise TypeError("an integer is required: %s" % str(self.server_port))
- if self.server_port < 0 or self.server_port > 65535:
- raise OverflowError("port number must be 0-65535.: %s" % str(self.server_port))
diff --git a/src/bin/stats/tests/fake_time.py b/src/bin/stats/tests/fake_time.py
deleted file mode 100644
index 65e0237..0000000
--- a/src/bin/stats/tests/fake_time.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = "$Revision$"
-
-# This is a dummy time class against a Python standard time class.
-# It is just testing use only.
-# Other methods which time class has is not implemented.
-# (This class isn't orderloaded for time class.)
-
-# These variables are constant. These are example.
-_TEST_TIME_SECS = 1283364938.229088
-_TEST_TIME_STRF = '2010-09-01T18:15:38Z'
-
-def time():
- """
- This is a dummy time() method against time.time()
- """
- # return float constant value
- return _TEST_TIME_SECS
-
-def gmtime():
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- # always return nothing
- return None
-
-def strftime(*arg):
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- return _TEST_TIME_STRF
-
-
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
deleted file mode 100644
index 79263a9..0000000
--- a/src/bin/stats/tests/http/Makefile.am
+++ /dev/null
@@ -1,6 +0,0 @@
-EXTRA_DIST = __init__.py server.py
-CLEANFILES = __init__.pyc server.pyc
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/http/__init__.py b/src/bin/stats/tests/http/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/http/server.py b/src/bin/stats/tests/http/server.py
deleted file mode 100644
index 70ed6fa..0000000
--- a/src/bin/stats/tests/http/server.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of http.server
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-
-class DummyHttpResponse:
- def __init__(self, path):
- self.path = path
- self.headers={}
- self.log = ""
-
- def _write_log(self, msg):
- self.log = self.log + msg
-
-class HTTPServer:
- """
- A mock-up class of http.server.HTTPServer
- """
- address_family = fake_socket.AF_INET
- def __init__(self, server_class, handler_class):
- self.socket = fake_socket.socket(self.address_family)
- self.server_class = server_class
- self.socket.bind(self.server_class)
- self._handler = handler_class(None, None, self)
-
- def handle_request(self):
- pass
-
- def server_close(self):
- self.socket.close()
-
-class BaseHTTPRequestHandler:
- """
- A mock-up class of http.server.BaseHTTPRequestHandler
- """
-
- def __init__(self, request, client_address, server):
- self.path = "/path/to"
- self.headers = {}
- self.server = server
- self.response = DummyHttpResponse(path=self.path)
- self.response.write = self._write
- self.wfile = self.response
-
- def send_response(self, code=0):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
-
- def send_header(self, key, value):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.headers[key] = value
-
- def end_headers(self):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.wrote_headers = True
-
- def send_error(self, code, message=None):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
- self.response.body = message
-
- def address_string(self):
- return 'dummyhost'
-
- def log_date_time_string(self):
- return '[DD/MM/YYYY HH:MI:SS]'
-
- def _write(self, obj):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.body = obj.decode()
-
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
deleted file mode 100644
index bdfa1eb..0000000
--- a/src/bin/stats/tests/isc/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-SUBDIRS = cc config util log log_messages
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/__init__.py b/src/bin/stats/tests/isc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
deleted file mode 100644
index 67323b5..0000000
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py session.py
-CLEANFILES = __init__.pyc session.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/__init__.py b/src/bin/stats/tests/isc/cc/__init__.py
deleted file mode 100644
index 9a3eaf6..0000000
--- a/src/bin/stats/tests/isc/cc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.cc.session import *
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
deleted file mode 100644
index e18a695..0000000
--- a/src/bin/stats/tests/isc/cc/session.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import sys
-import fake_socket
-
-# set a dummy lname
-_TEST_LNAME = '123abc at xxxx'
-
-class Queue():
- def __init__(self, msg=None, env={}):
- self.msg = msg
- self.env = env
-
- def dump(self):
- return { 'msg': self.msg, 'env': self.env }
-
-class SessionError(Exception):
- pass
-
-class SessionTimeout(Exception):
- pass
-
-class Session:
- def __init__(self, socket_file=None, verbose=False):
- self._lname = _TEST_LNAME
- self.message_queue = []
- self.old_message_queue = []
- try:
- self._socket = fake_socket.socket()
- except fake_socket.error as se:
- raise SessionError(se)
- self.verbose = verbose
-
- @property
- def lname(self):
- return self._lname
-
- def close(self):
- self._socket.close()
-
- def _clear_queues(self):
- while len(self.message_queue) > 0:
- self.dequeue()
-
- def _next_sequence(self, que=None):
- return len(self.message_queue)
-
- def enqueue(self, msg=None, env={}):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- seq = self._next_sequence()
- env.update({"seq": 0}) # fixed here
- que = Queue(msg=msg, env=env)
- self.message_queue.append(que)
- if self.verbose:
- sys.stdout.write("[Session] enqueue: " + str(que.dump()) + "\n")
- return seq
-
- def dequeue(self):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = None
- try:
- que = self.message_queue.pop(0) # always pop at index 0
- self.old_message_queue.append(que)
- except IndexError:
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] dequeue: " + str(que.dump()) + "\n")
- return que
-
- def get_queue(self, seq=None):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- if seq is None:
- seq = len(self.message_queue) - 1
- que = None
- try:
- que = self.message_queue[seq]
- except IndexError:
- raise IndexError
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] get_queue: " + str(que.dump()) + "\n")
- return que
-
- def group_sendmsg(self, msg, group, instance="*", to="*"):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": to,
- "group": group,
- "instance": instance })
-
- def group_recvmsg(self, nonblock=True, seq=0):
- que = self.dequeue()
- if que.msg != None:
- cmd = que.msg.get("command")
- if cmd and cmd[0] == 'getstats':
- # Create answer for command 'getstats'
- retdata = { "stats_data": {
- 'bind10.boot_time' : "1970-01-01T00:00:00Z"
- }}
- return {'result': [0, retdata]}, que.env
- return que.msg, que.env
-
- def group_reply(self, routing, msg):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": routing["from"],
- "group": routing["group"],
- "instance": routing["instance"],
- "reply": routing["seq"] })
-
- def get_message(self, group, to='*'):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = Queue()
- for q in self.message_queue:
- if q.env['group'] == group:
- self.message_queue.remove(q)
- self.old_message_queue.append(q)
- que = q
- if self.verbose:
- sys.stdout.write("[Session] get_message: " + str(que.dump()) + "\n")
- return q.msg
-
- def group_subscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
-
- def group_unsubscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
deleted file mode 100644
index ffbecda..0000000
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py ccsession.py
-CLEANFILES = __init__.pyc ccsession.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/__init__.py b/src/bin/stats/tests/isc/config/__init__.py
deleted file mode 100644
index 4c49e95..0000000
--- a/src/bin/stats/tests/isc/config/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.config.ccsession import *
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
deleted file mode 100644
index 50f7c1b..0000000
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import json
-import os
-import time
-from isc.cc.session import Session
-
-COMMAND_CONFIG_UPDATE = "config_update"
-
-def parse_answer(msg):
- assert 'result' in msg
- try:
- return msg['result'][0], msg['result'][1]
- except IndexError:
- return msg['result'][0], None
-
-def create_answer(rcode, arg = None):
- if arg is None:
- return { 'result': [ rcode ] }
- else:
- return { 'result': [ rcode, arg ] }
-
-def parse_command(msg):
- assert 'command' in msg
- try:
- return msg['command'][0], msg['command'][1]
- except IndexError:
- return msg['command'][0], None
-
-def create_command(command_name, params = None):
- if params is None:
- return {"command": [command_name]}
- else:
- return {"command": [command_name, params]}
-
-def module_spec_from_file(spec_file, check = True):
- try:
- file = open(spec_file)
- json_str = file.read()
- module_spec = json.loads(json_str)
- file.close()
- return ModuleSpec(module_spec['module_spec'], check)
- except IOError as ioe:
- raise ModuleSpecError("JSON read error: " + str(ioe))
- except ValueError as ve:
- raise ModuleSpecError("JSON parse error: " + str(ve))
- except KeyError as err:
- raise ModuleSpecError("Data definition has no module_spec element")
-
-class ModuleSpecError(Exception):
- pass
-
-class ModuleSpec:
- def __init__(self, module_spec, check = True):
- # check only confi_data for testing
- if check and "config_data" in module_spec:
- _check_config_spec(module_spec["config_data"])
- self._module_spec = module_spec
-
- def get_config_spec(self):
- return self._module_spec['config_data']
-
- def get_commands_spec(self):
- return self._module_spec['commands']
-
- def get_module_name(self):
- return self._module_spec['module_name']
-
-def _check_config_spec(config_data):
- # config data is a list of items represented by dicts that contain
- # things like "item_name", depending on the type they can have
- # specific subitems
- """Checks a list that contains the configuration part of the
- specification. Raises a ModuleSpecError if there is a
- problem."""
- if type(config_data) != list:
- raise ModuleSpecError("config_data is of type " + str(type(config_data)) + ", not a list of items")
- for config_item in config_data:
- _check_item_spec(config_item)
-
-def _check_item_spec(config_item):
- """Checks the dict that defines one config item
- (i.e. containing "item_name", "item_type", etc.
- Raises a ModuleSpecError if there is an error"""
- if type(config_item) != dict:
- raise ModuleSpecError("item spec not a dict")
- if "item_name" not in config_item:
- raise ModuleSpecError("no item_name in config item")
- if type(config_item["item_name"]) != str:
- raise ModuleSpecError("item_name is not a string: " + str(config_item["item_name"]))
- item_name = config_item["item_name"]
- if "item_type" not in config_item:
- raise ModuleSpecError("no item_type in config item")
- item_type = config_item["item_type"]
- if type(item_type) != str:
- raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
- if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
- raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
- if "item_optional" in config_item:
- if type(config_item["item_optional"]) != bool:
- raise ModuleSpecError("item_default in " + item_name + " is not a boolean")
- if not config_item["item_optional"] and "item_default" not in config_item:
- raise ModuleSpecError("no default value for non-optional item " + item_name)
- else:
- raise ModuleSpecError("item_optional not in item " + item_name)
- if "item_default" in config_item:
- item_default = config_item["item_default"]
- if (item_type == "integer" and type(item_default) != int) or \
- (item_type == "real" and type(item_default) != float) or \
- (item_type == "boolean" and type(item_default) != bool) or \
- (item_type == "string" and type(item_default) != str) or \
- (item_type == "list" and type(item_default) != list) or \
- (item_type == "map" and type(item_default) != dict):
- raise ModuleSpecError("Wrong type for item_default in " + item_name)
- # TODO: once we have check_type, run the item default through that with the list|map_item_spec
- if item_type == "list":
- if "list_item_spec" not in config_item:
- raise ModuleSpecError("no list_item_spec in list item " + item_name)
- if type(config_item["list_item_spec"]) != dict:
- raise ModuleSpecError("list_item_spec in " + item_name + " is not a dict")
- _check_item_spec(config_item["list_item_spec"])
- if item_type == "map":
- if "map_item_spec" not in config_item:
- raise ModuleSpecError("no map_item_sepc in map item " + item_name)
- if type(config_item["map_item_spec"]) != list:
- raise ModuleSpecError("map_item_spec in " + item_name + " is not a list")
- for map_item in config_item["map_item_spec"]:
- if type(map_item) != dict:
- raise ModuleSpecError("map_item_spec element is not a dict")
- _check_item_spec(map_item)
- if 'item_format' in config_item and 'item_default' in config_item:
- item_format = config_item["item_format"]
- item_default = config_item["item_default"]
- if not _check_format(item_default, item_format):
- raise ModuleSpecError(
- "Wrong format for " + str(item_default) + " in " + str(item_name))
-
-def _check_format(value, format_name):
- """Check if specified value and format are correct. Return True if
- is is correct."""
- # TODO: should be added other format types if necessary
- time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
- 'date' : "%Y-%m-%d",
- 'time' : "%H:%M:%S" }
- for fmt in time_formats:
- if format_name == fmt:
- try:
- time.strptime(value, time_formats[fmt])
- return True
- except (ValueError, TypeError):
- break
- return False
-
-class ModuleCCSessionError(Exception):
- pass
-
-class DataNotFoundError(Exception):
- pass
-
-class ConfigData:
- def __init__(self, specification):
- self.specification = specification
-
- def get_value(self, identifier):
- """Returns a tuple where the first item is the value at the
- given identifier, and the second item is absolutely False
- even if the value is an unset default or not. Raises an
- DataNotFoundError if the identifier is not found in the
- specification file.
- *** NOTE ***
- There are some differences from the original method. This
- method never handles local settings like the original
- method. But these different behaviors aren't so big issues
- for a mock-up method of stats_httpd because stats_httpd
- calls this method at only first."""
- for config_map in self.get_module_spec().get_config_spec():
- if config_map['item_name'] == identifier:
- if 'item_default' in config_map:
- return config_map['item_default'], False
- raise DataNotFoundError("item_name %s is not found in the specfile" % identifier)
-
- def get_module_spec(self):
- return self.specification
-
-class ModuleCCSession(ConfigData):
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
- module_spec = module_spec_from_file(spec_file_name)
- ConfigData.__init__(self, module_spec)
- self._module_name = module_spec.get_module_name()
- self.set_config_handler(config_handler)
- self.set_command_handler(command_handler)
- if not cc_session:
- self._session = Session(verbose=True)
- else:
- self._session = cc_session
-
- def start(self):
- pass
-
- def close(self):
- self._session.close()
-
- def check_command(self, nonblock=True):
- msg, env = self._session.group_recvmsg(nonblock)
- if not msg or 'result' in msg:
- return
- cmd, arg = parse_command(msg)
- answer = None
- if cmd == COMMAND_CONFIG_UPDATE and self._config_handler:
- answer = self._config_handler(arg)
- elif env['group'] == self._module_name and self._command_handler:
- answer = self._command_handler(cmd, arg)
- if answer:
- self._session.group_reply(env, answer)
-
- def set_config_handler(self, config_handler):
- self._config_handler = config_handler
- # should we run this right now since we've changed the handler?
-
- def set_command_handler(self, command_handler):
- self._command_handler = command_handler
-
- def get_module_spec(self):
- return self.specification
-
- def get_socket(self):
- return self._session._socket
-
diff --git a/src/bin/stats/tests/isc/log/Makefile.am b/src/bin/stats/tests/isc/log/Makefile.am
deleted file mode 100644
index 457b9de..0000000
--- a/src/bin/stats/tests/isc/log/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log/__init__.py b/src/bin/stats/tests/isc/log/__init__.py
deleted file mode 100644
index 641cf79..0000000
--- a/src/bin/stats/tests/isc/log/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# This file is not installed. The log.so is installed into the right place.
-# It is only to find it in the .libs directory when we run as a test or
-# from the build directory.
-# But as nobody gives us the builddir explicitly (and we can't use generation
-# from .in file, as it would put us into the builddir and we wouldn't be found)
-# we guess from current directory. Any idea for something better? This should
-# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
-# Should we look there? Or define something in bind10_config?
-
-import os
-import sys
-
-for base in sys.path[:]:
- loglibdir = os.path.join(base, 'isc/log/.libs')
- if os.path.exists(loglibdir):
- sys.path.insert(0, loglibdir)
-
-from log import *
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
deleted file mode 100644
index 9c74354..0000000
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py process.py
-CLEANFILES = __init__.pyc process.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/util/__init__.py b/src/bin/stats/tests/isc/util/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/util/process.py b/src/bin/stats/tests/isc/util/process.py
deleted file mode 100644
index 0f764c1..0000000
--- a/src/bin/stats/tests/isc/util/process.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A dummy function of isc.util.process.rename()
-"""
-
-def rename(name=None):
- pass
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
new file mode 100644
index 0000000..da0bac4
--- /dev/null
+++ b/src/bin/stats/tests/test_utils.py
@@ -0,0 +1,364 @@
+"""
+Utilities and mock modules for unittests of statistics modules
+
+"""
+import os
+import io
+import time
+import sys
+import threading
+import tempfile
+import json
+import signal
+
+import msgq
+import isc.config.cfgmgr
+import stats
+import stats_httpd
+
+# Change value of BIND10_MSGQ_SOCKET_FILE in environment variables
+if 'BIND10_MSGQ_SOCKET_FILE' not in os.environ:
+ os.environ['BIND10_MSGQ_SOCKET_FILE'] = tempfile.mktemp(prefix='msgq_socket_')
+
+class SignalHandler():
+ """A signal handler class for deadlock in unittest"""
+ def __init__(self, fail_handler, timeout=20):
+ """sets a schedule in SIGARM for invoking the handler via
+ unittest.TestCase after timeout seconds (default is 20)"""
+ self.fail_handler = fail_handler
+ self.orig_handler = signal.signal(signal.SIGALRM, self.sig_handler)
+ signal.alarm(timeout)
+
+ def reset(self):
+ """resets the schedule in SIGALRM"""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.orig_handler)
+
+ def sig_handler(self, signal, frame):
+ """envokes unittest.TestCase.fail as a signal handler"""
+ self.fail_handler("A deadlock might be detected")
+
+def send_command(command_name, module_name, params=None, session=None, nonblock=False, timeout=None):
+ if session is not None:
+ cc_session = session
+ else:
+ cc_session = isc.cc.Session()
+ if timeout is not None:
+ orig_timeout = cc_session.get_timeout()
+ cc_session.set_timeout(timeout * 1000)
+ command = isc.config.ccsession.create_command(command_name, params)
+ seq = cc_session.group_sendmsg(command, module_name)
+ try:
+ (answer, env) = cc_session.group_recvmsg(nonblock, seq)
+ if answer:
+ return isc.config.ccsession.parse_answer(answer)
+ except isc.cc.SessionTimeout:
+ pass
+ finally:
+ if timeout is not None:
+ cc_session.set_timeout(orig_timeout)
+ if session is None:
+ cc_session.close()
+
+def send_shutdown(module_name, **kwargs):
+ return send_command("shutdown", module_name, **kwargs)
+
+class ThreadingServerManager:
+ def __init__(self, server, *args, **kwargs):
+ self.server = server(*args, **kwargs)
+ self.server_name = server.__name__
+ self.server._thread = threading.Thread(
+ name=self.server_name, target=self.server.run)
+ self.server._thread.daemon = True
+
+ def run(self):
+ self.server._thread.start()
+ self.server._started.wait()
+ self.server._started.clear()
+
+ def shutdown(self):
+ self.server.shutdown()
+ self.server._thread.join(0) # timeout is 0
+
+def do_nothing(*args, **kwargs): pass
+
+class dummy_sys:
+ """Dummy for sys"""
+ class dummy_io:
+ write = do_nothing
+ stdout = stderr = dummy_io()
+
+class MockMsgq:
+ def __init__(self):
+ self._started = threading.Event()
+ # suppress output to stdout and stderr
+ msgq.sys = dummy_sys()
+ msgq.print = do_nothing
+ self.msgq = msgq.MsgQ(verbose=False)
+ result = self.msgq.setup()
+ if result:
+ sys.exit("Error on Msgq startup: %s" % result)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.msgq.run()
+ except Exception:
+ pass
+ finally:
+ # explicitly shut down the socket of the msgq before
+ # shutting down the msgq
+ self.msgq.listen_socket.shutdown(msgq.socket.SHUT_RDWR)
+ self.msgq.shutdown()
+
+ def shutdown(self):
+ # do nothing for avoiding shutting down the msgq twice
+ pass
+
+class MockCfgmgr:
+ def __init__(self):
+ self._started = threading.Event()
+ self.cfgmgr = isc.config.cfgmgr.ConfigManager(
+ os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
+ self.cfgmgr.read_config()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.cfgmgr.run()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.cfgmgr.running = False
+
+class MockBoss:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Boss",
+ "module_description": "Mock Master process",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+"""
+ _BASETIME = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self._started.set()
+ self.got_command_name = command
+ params = { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
+ }
+ }
+ if command == 'sendstats':
+ send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(0)
+ elif command == 'getstats':
+ return isc.config.create_answer(0, params)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MockAuth:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Auth",
+ "module_description": "Mock Authoritative service",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
+ ]
+ }
+}
+"""
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+ self.queries_tcp = 3
+ self.queries_udp = 2
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self.got_command_name = command
+ if command == 'sendstats':
+ params = { "owner": "Auth",
+ "data": { 'queries.tcp': self.queries_tcp,
+ 'queries.udp': self.queries_udp } }
+ return send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MyStats(stats.Stats):
+ def __init__(self):
+ self._started = threading.Event()
+ stats.Stats.__init__(self)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_shutdown()
+
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+ ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
+ def __init__(self, *server_address):
+ self._started = threading.Event()
+ if server_address:
+ stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+ try:
+ stats_httpd.StatsHttpd.__init__(self)
+ finally:
+ if hasattr(stats_httpd.SPECFILE_LOCATION, "close"):
+ stats_httpd.SPECFILE_LOCATION.close()
+ stats_httpd.SPECFILE_LOCATION = self.ORIG_SPECFILE_LOCATION
+ else:
+ stats_httpd.StatsHttpd.__init__(self)
+
+ def create_specfile(self, *server_address):
+ spec_io = open(self.ORIG_SPECFILE_LOCATION)
+ try:
+ spec = json.load(spec_io)
+ spec_io.close()
+ config = spec['module_spec']['config_data']
+ for i in range(len(config)):
+ if config[i]['item_name'] == 'listen_on':
+ config[i]['item_default'] = \
+ [ dict(address=a[0], port=a[1]) for a in server_address ]
+ break
+ return io.StringIO(json.dumps(spec))
+ finally:
+ spec_io.close()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_handler('shutdown', None)
+
+class BaseModules:
+ def __init__(self):
+ # MockMsgq
+ self.msgq = ThreadingServerManager(MockMsgq)
+ self.msgq.run()
+ # Check whether msgq is ready. A SessionTimeout is raised here if not.
+ isc.cc.session.Session().close()
+ # MockCfgmgr
+ self.cfgmgr = ThreadingServerManager(MockCfgmgr)
+ self.cfgmgr.run()
+ # MockBoss
+ self.boss = ThreadingServerManager(MockBoss)
+ self.boss.run()
+ # MockAuth
+ self.auth = ThreadingServerManager(MockAuth)
+ self.auth.run()
+
+ def shutdown(self):
+ # MockAuth
+ self.auth.shutdown()
+ # MockBoss
+ self.boss.shutdown()
+ # MockCfgmgr
+ self.cfgmgr.shutdown()
+ # MockMsgq
+ self.msgq.shutdown()
diff --git a/src/bin/stats/tests/testdata/Makefile.am b/src/bin/stats/tests/testdata/Makefile.am
deleted file mode 100644
index 1b8df6d..0000000
--- a/src/bin/stats/tests/testdata/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-EXTRA_DIST = stats_test.spec
diff --git a/src/bin/stats/tests/testdata/stats_test.spec b/src/bin/stats/tests/testdata/stats_test.spec
deleted file mode 100644
index 8136756..0000000
--- a/src/bin/stats/tests/testdata/stats_test.spec
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 0ce992d..41b497f 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -14,7 +14,7 @@ endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index 54dbe7c..056103a 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -2,12 +2,12 @@
.\" Title: b10-xfrin
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: September 8, 2011
+.\" Date: October 12, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-XFRIN" "8" "September 8, 2011" "BIND10" "BIND10"
+.TH "B10\-XFRIN" "8" "October 12, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -29,23 +29,11 @@ The
\fBb10\-xfrin\fR
daemon provides the BIND 10 incoming DNS zone transfer service\&. Normally it is started by the
\fBbind10\fR(8)
-boss process\&. When triggered it can request and receive a zone transfer and store the zone in a BIND 10 zone data store\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-.sp
-This prototype release only supports AXFR\&. IXFR is not implemented\&.
-.sp .5v
-.RE
+boss process\&. When triggered it can request and receive a zone transfer and store the zone in a BIND 10 zone data source\&.
+.PP
+The
+\fBb10\-xfrin\fR
+daemon supports both AXFR and IXFR\&. Due to some implementation limitations of the current development release, however, it only tries AXFR by default, and care should be taken to enable IXFR\&. See the BIND 10 Guide for more details\&.
.PP
This daemon communicates with BIND 10 over a
\fBb10-msgq\fR(8)
@@ -77,7 +65,9 @@ daemon\&. The list items are:
\fImaster_addr\fR
(the zone master to transfer from),
\fImaster_port\fR
-(defaults to 53), and
+(defaults to 53),
+\fIuse_ixfr\fR
+(defaults to false), and
\fItsig_key\fR
(optional TSIG key to use)\&. The
\fItsig_key\fR
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index d45e15f..231681c 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>September 8, 2011</date>
+ <date>October 12, 2011</date>
</refentryinfo>
<refmeta>
@@ -59,7 +59,7 @@
<citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
boss process.
When triggered it can request and receive a zone transfer and store
- the zone in a BIND 10 zone data store.
+ the zone in a BIND 10 zone data source.
</para>
<!-- TODO:
@@ -68,9 +68,14 @@ The logic for handling transfer triggers or zone management is handled
in separate zonemgr process.
-->
- <note><simpara>
- This prototype release only supports AXFR. IXFR is not implemented.
- </simpara></note>
+ <para>
+ The <command>b10-xfrin</command> daemon supports both AXFR and
+ IXFR. Due to some implementation limitations of the current
+ development release, however, it only tries AXFR by default,
+ and care should be taken to enable IXFR.
+ See the BIND 10 Guide for more details.
+ </para>
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
<para>
This daemon communicates with BIND 10 over a
@@ -105,7 +110,8 @@ in separate zonemgr process.
<varname>name</varname> (the zone name),
<varname>class</varname> (defaults to <quote>IN</quote>),
<varname>master_addr</varname> (the zone master to transfer from),
- <varname>master_port</varname> (defaults to 53), and
+ <varname>master_port</varname> (defaults to 53),
+ <varname>use_ixfr</varname> (defaults to false), and
<varname>tsig_key</varname> (optional TSIG key to use).
The <varname>tsig_key</varname> is specified using a full string
colon-delimited name:key:algorithm representation (e.g.
@@ -153,7 +159,7 @@ in separate zonemgr process.
according to the SOA's REFRESH time
to tell <command>b10-xfrin</command> that the zone needs to do
a zone refresh.
- This is an internal command and not exposed to the administrator.
+ This is an internal command and not exposed to the administrator.
<!-- not defined in spec -->
</para>
@@ -203,7 +209,7 @@ add a usage example of xfrin -->
</para></note>
<!-- TODO:
- it can handle more than one XFR in now,
+ it can handle more than one XFR in now,
but the problem is If SQLITE3 datasource part support multiple write
operation
-->
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 3d56009..8f4fa91 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = testdata .
+
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrin_test.py
EXTRA_DIST = $(PYTESTS)
@@ -7,6 +9,9 @@ EXTRA_DIST = $(PYTESTS)
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# sunstudio needs the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,5 +25,7 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
+ TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
+ TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/tests/testdata/Makefile.am b/src/bin/xfrin/tests/testdata/Makefile.am
new file mode 100644
index 0000000..5e325cb
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/Makefile.am
@@ -0,0 +1,2 @@
+EXTRA_DIST = example.com # not necessarily needed, but for reference
+EXTRA_DIST += example.com.sqlite3
diff --git a/src/bin/xfrin/tests/testdata/example.com b/src/bin/xfrin/tests/testdata/example.com
new file mode 100644
index 0000000..2afcd28
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/example.com
@@ -0,0 +1,17 @@
+;; This is a simplest form of zone file for 'example.com', which is the
+;; source of the corresponding sqlite3 DB file. This file is provided
+;; for reference purposes only; it's not actually used anywhere.
+
+example.com. 3600 IN SOA master.example.com. admin.example.com. (
+ 1230 ; serial
+ 3600 ; refresh (1 hour)
+ 1800 ; retry (30 minutes)
+ 2419200 ; expire (4 weeks)
+ 7200 ; minimum (2 hours)
+ )
+ 3600 NS dns01.example.com.
+ 3600 NS dns02.example.com.
+ 3600 NS dns03.example.com.
+dns01.example.com. 3600 IN A 192.0.2.1
+dns02.example.com. 3600 IN A 192.0.2.2
+dns03.example.com. 3600 IN A 192.0.2.3
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
new file mode 100644
index 0000000..ed241c3
Binary files /dev/null and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 05cce98..65bd968 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,10 +14,12 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
+import shutil
import socket
import io
from isc.testutils.tsigctx_mock import MockTSIGContext
from xfrin import *
+from isc.xfrin.diff import Diff
import isc.log
#
@@ -36,34 +38,150 @@ TEST_MASTER_IPV6_ADDRESS = '::1'
TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
socket.IPPROTO_TCP, '',
(TEST_MASTER_IPV6_ADDRESS, 53))
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
+TESTDATA_OBJDIR = os.getenv("TESTDATAOBJDIR")
# XXX: This should be a non priviledge port that is unlikely to be used.
# If some other process uses this port test will fail.
TEST_MASTER_PORT = '53535'
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+# SOA intended to be used for the new SOA as a result of transfer.
soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
'master.example.com. admin.example.com ' +
'1234 3600 1800 2419200 7200')
-soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
- RRTTL(3600))
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
soa_rrset.add_rdata(soa_rdata)
-example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())
-example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.SOA())
+
+# SOA intended to be used for the current SOA at the secondary side.
+# Note that its serial is smaller than that of soa_rdata.
+begin_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+ 'master.example.com. admin.example.com ' +
+ '1230 3600 1800 2419200 7200')
+begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+begin_soa_rrset.add_rdata(begin_soa_rdata)
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR())
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA())
default_questions = [example_axfr_question]
default_answers = [soa_rrset]
+def check_diffs(assert_fn, expected, actual):
+ '''A helper function checking the differences made in the XFR session.
+
+ This is expected called from some subclass of unittest.TestCase and
+ assert_fn is generally expected to be 'self.assertEqual' of that class.
+
+ '''
+ assert_fn(len(expected), len(actual))
+ for (diffs_exp, diffs_actual) in zip(expected, actual):
+ assert_fn(len(diffs_exp), len(diffs_actual))
+ for (diff_exp, diff_actual) in zip(diffs_exp, diffs_actual):
+ # operation should match
+ assert_fn(diff_exp[0], diff_actual[0])
+ # The diff as RRset should be equal (for simplicity we assume
+ # all RRsets contain exactly one RDATA)
+ assert_fn(diff_exp[1].get_name(), diff_actual[1].get_name())
+ assert_fn(diff_exp[1].get_type(), diff_actual[1].get_type())
+ assert_fn(diff_exp[1].get_class(), diff_actual[1].get_class())
+ assert_fn(diff_exp[1].get_rdata_count(),
+ diff_actual[1].get_rdata_count())
+ assert_fn(1, diff_exp[1].get_rdata_count())
+ assert_fn(diff_exp[1].get_rdata()[0],
+ diff_actual[1].get_rdata()[0])
+
class XfrinTestException(Exception):
pass
+class XfrinTestTimeoutException(Exception):
+ pass
+
class MockCC():
def get_default_value(self, identifier):
+ # The returned values should be identical to the spec file
+ # XXX: these should be retrieved from the spec file
+ # (see MyCCSession of xfrout_test.py.in)
if identifier == "zones/master_port":
return TEST_MASTER_PORT
if identifier == "zones/class":
return TEST_RRCLASS_STR
+ if identifier == "zones/use_ixfr":
+ return False
+
+class MockDataSourceClient():
+ '''A simple mock data source client.
+
+ This class provides a minimal set of wrappers related the data source
+ API that would be used by Diff objects. For our testing purposes they
+ only keep truck of the history of the changes.
+
+ '''
+ def __init__(self):
+ self.force_fail = False # if True, raise an exception on commit
+ self.committed_diffs = []
+ self.diffs = []
+
+ def get_class(self):
+ '''Mock version of get_class().
+
+ We simply return the commonly used constant RR class. If and when
+ we use this mock for a different RR class we need to adjust it
+ accordingly.
+
+ '''
+ return TEST_RRCLASS
+
+ def find_zone(self, zone_name):
+ '''Mock version of find_zone().
+
+ It returns itself (subsequently acting as a mock ZoneFinder) for
+ some test zone names. For some others it returns either NOTFOUND
+ or PARTIALMATCH.
+
+ '''
+ if zone_name == TEST_ZONE_NAME or \
+ zone_name == Name('no-soa.example') or \
+ zone_name == Name('dup-soa.example'):
+ return (isc.datasrc.DataSourceClient.SUCCESS, self)
+ elif zone_name == Name('no-such-zone.example'):
+ return (DataSourceClient.NOTFOUND, None)
+ elif zone_name == Name('partial-match-zone.example'):
+ return (DataSourceClient.PARTIALMATCH, self)
+ raise ValueError('Unexpected input to mock client: bug in test case?')
+
+ def find(self, name, rrtype, target, options):
+ '''Mock ZoneFinder.find().
+
+ It returns the predefined SOA RRset to queries for SOA of the common
+ test zone name. It also emulates some unusual cases for special
+ zone names.
+
+ '''
+ if name == TEST_ZONE_NAME and rrtype == RRType.SOA():
+ return (ZoneFinder.SUCCESS, begin_soa_rrset)
+ if name == Name('no-soa.example'):
+ return (ZoneFinder.NXDOMAIN, None)
+ if name == Name('dup-soa.example'):
+ dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA(), RRTTL(0))
+ dup_soa_rrset.add_rdata(begin_soa_rdata)
+ dup_soa_rrset.add_rdata(soa_rdata)
+ return (ZoneFinder.SUCCESS, dup_soa_rrset)
+ raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+ def get_updater(self, zone_name, replace):
+ return self
+
+ def add_rrset(self, rrset):
+ self.diffs.append(('add', rrset))
+
+ def delete_rrset(self, rrset):
+ self.diffs.append(('delete', rrset))
+
+ def commit(self):
+ if self.force_fail:
+ raise isc.datasrc.Error('Updater.commit() failed')
+ self.committed_diffs.append(self.diffs)
+ self.diffs = []
class MockXfrin(Xfrin):
# This is a class attribute of a callable object that specifies a non
@@ -87,20 +205,21 @@ class MockXfrin(Xfrin):
MockXfrin.check_command_hook()
def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
- tsig_key, check_soa=True):
+ tsig_key, request_type, check_soa=True):
# store some of the arguments for verification, then call this
# method in the superclass
self.xfrin_started_master_addr = master_addrinfo[2][0]
self.xfrin_started_master_port = master_addrinfo[2][1]
- return Xfrin.xfrin_start(self, zone_name, rrclass, db_file,
+ self.xfrin_started_request_type = request_type
+ return Xfrin.xfrin_start(self, zone_name, rrclass, None,
master_addrinfo, tsig_key,
- check_soa)
+ request_type, check_soa)
class MockXfrinConnection(XfrinConnection):
- def __init__(self, sock_map, zone_name, rrclass, db_file, shutdown_event,
+ def __init__(self, sock_map, zone_name, rrclass, shutdown_event,
master_addr):
- super().__init__(sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addr)
+ super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
+ shutdown_event, master_addr)
self.query_data = b''
self.reply_data = b''
self.force_time_out = False
@@ -121,8 +240,11 @@ class MockXfrinConnection(XfrinConnection):
def recv(self, size):
data = self.reply_data[:size]
self.reply_data = self.reply_data[size:]
+ if len(data) == 0:
+ raise XfrinTestTimeoutException('Emulated timeout')
if len(data) < size:
- raise XfrinTestException('cannot get reply data')
+ raise XfrinTestException('cannot get reply data (' + str(size) +
+ ' bytes)')
return data
def send(self, data):
@@ -174,14 +296,296 @@ class MockXfrinConnection(XfrinConnection):
return reply_data
+class TestXfrinState(unittest.TestCase):
+ def setUp(self):
+ self.sock_map = {}
+ self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+ TEST_RRCLASS, threading.Event(),
+ TEST_MASTER_IPV4_ADDRINFO)
+ self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ RRTTL(3600))
+ self.begin_soa.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS,
+ 'm. r. 1230 0 0 0 0'))
+ self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ RRTTL(3600))
+ self.ns_rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS,
+ 'ns.example.com'))
+ self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A(),
+ RRTTL(3600))
+ self.a_rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, '192.0.2.1'))
+
+ self.conn._datasrc_client = MockDataSourceClient()
+ self.conn._diff = Diff(self.conn._datasrc_client, TEST_ZONE_NAME)
+
+class TestXfrinStateBase(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+
+ def test_handle_rr_on_base(self):
+ # The base version of handle_rr() isn't supposed to be called
+ # directly (the argument doesn't matter in this test)
+ self.assertRaises(XfrinException, XfrinState().handle_rr, None)
+
+class TestXfrinInitialSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinInitialSOA()
+
+ def test_handle_rr(self):
+ # normal case
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinFirstData()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual(1234, self.conn._end_serial)
+
+ def test_handle_not_soa(self):
+ # The given RR is not of SOA
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinFirstData(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinFirstData()
+ self.conn._request_type = RRType.IXFR()
+ self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+ self.conn._diff = None # should be replaced in the AXFR case
+
+ def test_handle_ixfr_begin_soa(self):
+ self.conn._request_type = RRType.IXFR()
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinIXFRDeleteSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_axfr(self):
+ # If the original type is AXFR, other conditions aren't considered,
+ # and AXFR processing will continue
+ self.conn._request_type = RRType.AXFR()
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+
+ def test_handle_ixfr_to_axfr(self):
+ # Detecting AXFR-compatible IXFR response by seeing a non SOA RR after
+ # the initial SOA. Should switch to AXFR.
+ self.assertFalse(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+ # The Diff for AXFR should be created at this point
+ self.assertNotEqual(None, self.conn._diff)
+
+ def test_handle_ixfr_to_axfr_by_different_soa(self):
+ # An unusual case: Response contains two consecutive SOA but the
+ # serial of the second does not match the requested one. See
+ # the documentation for XfrinFirstData.handle_rr().
+ self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+ self.assertNotEqual(None, self.conn._diff)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDeleteSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRDeleteSOA()
+ # In this state a new Diff object is expected to be created. To
+ # confirm it, we nullify it beforehand.
+ self.conn._diff = None
+
+ def test_handle_rr(self):
+ self.assertTrue(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinIXFRDelete()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual([('delete', self.begin_soa)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_non_soa(self):
+ self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDelete(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ # We need record the state in 'conn' to check the case where the
+ # state doesn't change.
+ XfrinIXFRDelete().set_xfrstate(self.conn, XfrinIXFRDelete())
+ self.state = self.conn.get_xfrstate()
+
+ def test_handle_delete_rr(self):
+ # Non SOA RRs are simply (goting to be) deleted in this state
+ self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual([('delete', self.ns_rrset)],
+ self.conn._diff.get_buffer())
+ # The state shouldn't change
+ self.assertEqual(type(XfrinIXFRDelete()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_soa(self):
+ # SOA in this state means the beginning of added RRs. This SOA
+ # should also be added in the next state, so handle_rr() should return
+ # false.
+ self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual([], self.conn._diff.get_buffer())
+ self.assertEqual(1234, self.conn._current_serial)
+ self.assertEqual(type(XfrinIXFRAddSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAddSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRAddSOA()
+
+ def test_handle_rr(self):
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([('add', soa_rrset)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_non_soa(self):
+ self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAdd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ # We need record the state in 'conn' to check the case where the
+ # state doesn't change.
+ XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
+ self.conn._current_serial = 1230
+ self.state = self.conn.get_xfrstate()
+
+ def test_handle_add_rr(self):
+ # Non SOA RRs are simply (goting to be) added in this state
+ self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual([('add', self.ns_rrset)],
+ self.conn._diff.get_buffer())
+ # The state shouldn't change
+ self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+
+ def test_handle_end_soa(self):
+ self.conn._end_serial = 1234
+ self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ # handle_rr should have caused commit, and the buffer should now be
+ # empty.
+ self.assertEqual([], self.conn._diff.get_buffer())
+
+ def test_handle_new_delete(self):
+ self.conn._end_serial = 1234
+ # SOA RR whose serial is the current one means we are going to a new
+ # difference, starting with removing that SOA.
+ self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual([], self.conn._diff.get_buffer())
+ self.assertEqual(type(XfrinIXFRDeleteSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_out_of_sync(self):
+ # getting SOA with an inconsistent serial. This is an error.
+ self.conn._end_serial = 1235
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr,
+ self.conn, soa_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFREnd()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertFalse(self.state.finish_message(self.conn))
+
+class TestXfrinAXFR(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinAXFR()
+ self.conn._end_serial = 1234
+
+ def test_handle_rr(self):
+ """
+ Test we can put data inside.
+ """
+ # Put some data inside
+ self.assertTrue(self.state.handle_rr(self.conn, self.a_rrset))
+ # This test uses internal Diff structure to check the behaviour of
+ # XfrinAXFR. Maybe there could be a cleaner way, but it would be more
+ # complicated.
+ self.assertEqual([('add', self.a_rrset)], self.conn._diff.get_buffer())
+ # This SOA terminates the transfer
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ # It should have changed the state
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ # At this point, the data haven't been committed yet
+ self.assertEqual([('add', self.a_rrset), ('add', soa_rrset)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_rr_mismatch_soa(self):
+ """ SOA with inconsistent serial - unexpected, but we accept it.
+
+ """
+ self.assertTrue(self.state.handle_rr(self.conn, begin_soa_rrset))
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+
+ def test_finish_message(self):
+ """
+ Check normal end of message.
+ """
+ # When a message ends, nothing happens usually
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinAXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinAXFREnd()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.conn._diff.add_data(self.a_rrset)
+ self.conn._diff.add_data(soa_rrset)
+ self.assertFalse(self.state.finish_message(self.conn))
+
+ # The data should have been committed
+ self.assertEqual([], self.conn._diff.get_buffer())
+ check_diffs(self.assertEqual, [[('add', self.a_rrset),
+ ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+ self.assertRaises(ValueError, self.conn._diff.commit)
+
class TestXfrinConnection(unittest.TestCase):
+ '''Convenient parent class for XFR-protocol tests.
+
+ This class provides common setups and helper methods for protocol related
+ tests on AXFR and IXFR.
+
+ '''
+
def setUp(self):
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
self.sock_map = {}
- self.conn = MockXfrinConnection(self.sock_map, 'example.com.',
- TEST_RRCLASS, TEST_DB_FILE,
- threading.Event(),
+ self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+ TEST_RRCLASS, threading.Event(),
TEST_MASTER_IPV4_ADDRINFO)
self.soa_response_params = {
'questions': [example_soa_question],
@@ -192,6 +596,10 @@ class TestXfrinConnection(unittest.TestCase):
'axfr_after_soa': self._create_normal_response_data
}
self.axfr_response_params = {
+ 'question_1st': default_questions,
+ 'question_2nd': default_questions,
+ 'answer_1st': [soa_rrset, self._create_ns()],
+ 'answer_2nd': default_answers,
'tsig_1st': None,
'tsig_2nd': None
}
@@ -201,6 +609,82 @@ class TestXfrinConnection(unittest.TestCase):
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
+ def _create_normal_response_data(self):
+ # This helper method creates a simple sequence of DNS messages that
+ # forms a valid AXFR transaction. It consists of two messages: the
+ # first one containing SOA, NS, the second containing the trailing SOA.
+ question_1st = self.axfr_response_params['question_1st']
+ question_2nd = self.axfr_response_params['question_2nd']
+ answer_1st = self.axfr_response_params['answer_1st']
+ answer_2nd = self.axfr_response_params['answer_2nd']
+ tsig_1st = self.axfr_response_params['tsig_1st']
+ tsig_2nd = self.axfr_response_params['tsig_2nd']
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=question_1st, answers=answer_1st,
+ tsig_ctx=tsig_1st)
+ self.conn.reply_data += \
+ self.conn.create_response_data(questions=question_2nd,
+ answers=answer_2nd,
+ tsig_ctx=tsig_2nd)
+
+ def _create_soa_response_data(self):
+ # This helper method creates a DNS message that is supposed to be
+ # used a valid response to SOA queries prior to XFR.
+ # If tsig is True, it tries to verify the query with a locally
+ # created TSIG context (which may or may not succeed) so that the
+ # response will include a TSIG.
+ # If axfr_after_soa is True, it resets the response_generator so that
+ # a valid XFR messages will follow.
+
+ verify_ctx = None
+ if self.soa_response_params['tsig']:
+ # xfrin (currently) always uses TCP. strip off the length field.
+ query_data = self.conn.query_data[2:]
+ query_message = Message(Message.PARSE)
+ query_message.from_wire(query_data)
+ verify_ctx = TSIGContext(TSIG_KEY)
+ verify_ctx.verify(query_message.get_tsig_record(), query_data)
+
+ self.conn.reply_data = self.conn.create_response_data(
+ bad_qid=self.soa_response_params['bad_qid'],
+ response=self.soa_response_params['response'],
+ rcode=self.soa_response_params['rcode'],
+ questions=self.soa_response_params['questions'],
+ tsig_ctx=verify_ctx)
+ if self.soa_response_params['axfr_after_soa'] != None:
+ self.conn.response_generator = \
+ self.soa_response_params['axfr_after_soa']
+
+ def _create_broken_response_data(self):
+ # This helper method creates a bogus "DNS message" that only contains
+ # 4 octets of data. The DNS message parser will raise an exception.
+ bogus_data = b'xxxx'
+ self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
+ self.conn.reply_data += bogus_data
+
+ def _create_a(self, address):
+ rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A(),
+ RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, address))
+ return rrset
+
+ def _create_soa(self, serial):
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ RRTTL(3600))
+ rdata_str = 'm. r. ' + serial + ' 3600 1800 2419200 7200'
+ rrset.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS, rdata_str))
+ return rrset
+
+ def _create_ns(self, nsname='ns.'+TEST_ZONE_NAME_STR):
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(), RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
+ return rrset
+
+class TestAXFR(TestXfrinConnection):
+ def setUp(self):
+ super().setUp()
+ XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
+
def __create_mock_tsig(self, key, error):
# This helper function creates a MockTSIGContext for a given key
# and TSIG error to be used as a result of verify (normally faked
@@ -236,31 +720,81 @@ class TestXfrinConnection(unittest.TestCase):
# to confirm an AF_INET6 socket has been created. A naive application
# tends to assume it's IPv4 only and hardcode AF_INET. This test
# uncovers such a bug.
- c = MockXfrinConnection({}, 'example.com.', TEST_RRCLASS, TEST_DB_FILE,
- threading.Event(),
- TEST_MASTER_IPV6_ADDRINFO)
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, TEST_RRCLASS,
+ threading.Event(), TEST_MASTER_IPV6_ADDRINFO)
c.bind(('::', 0))
c.close()
def test_init_chclass(self):
- c = XfrinConnection({}, 'example.com.', RRClass.CH(), TEST_DB_FILE,
- threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH(),
+ threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
axfrmsg = c._create_query(RRType.AXFR())
self.assertEqual(axfrmsg.get_question()[0].get_class(),
RRClass.CH())
c.close()
- def test_send_query(self):
- def create_msg(query_type):
- msg = Message(Message.RENDER)
- query_id = 0x1035
- msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com."), RRClass.IN(), query_type)
- msg.add_question(query_question)
- return msg
+ def test_create_query(self):
+ def check_query(expected_qtype, expected_auth):
+ '''Helper method to repeat the same pattern of tests'''
+ self.assertEqual(Opcode.QUERY(), msg.get_opcode())
+ self.assertEqual(Rcode.NOERROR(), msg.get_rcode())
+ self.assertEqual(1, msg.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(TEST_ZONE_NAME, msg.get_question()[0].get_name())
+ self.assertEqual(expected_qtype, msg.get_question()[0].get_type())
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ANSWER))
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL))
+ if expected_auth is None:
+ self.assertEqual(0,
+ msg.get_rr_count(Message.SECTION_AUTHORITY))
+ else:
+ self.assertEqual(1,
+ msg.get_rr_count(Message.SECTION_AUTHORITY))
+ auth_rr = msg.get_section(Message.SECTION_AUTHORITY)[0]
+ self.assertEqual(expected_auth.get_name(), auth_rr.get_name())
+ self.assertEqual(expected_auth.get_type(), auth_rr.get_type())
+ self.assertEqual(expected_auth.get_class(),
+ auth_rr.get_class())
+ # In our test scenario RDATA must be 1
+ self.assertEqual(1, expected_auth.get_rdata_count())
+ self.assertEqual(1, auth_rr.get_rdata_count())
+ self.assertEqual(expected_auth.get_rdata()[0],
+ auth_rr.get_rdata()[0])
+
+ # Actual tests start here
+ # SOA query
+ msg = self.conn._create_query(RRType.SOA())
+ check_query(RRType.SOA(), None)
+
+ # AXFR query
+ msg = self.conn._create_query(RRType.AXFR())
+ check_query(RRType.AXFR(), None)
+
+ # IXFR query
+ msg = self.conn._create_query(RRType.IXFR())
+ check_query(RRType.IXFR(), begin_soa_rrset)
+ self.assertEqual(1230, self.conn._request_serial)
+
+ def test_create_ixfr_query_fail(self):
+ # In these cases _create_query() will fail to find a valid SOA RR to
+ # insert in the IXFR query, and should raise an exception.
+
+ self.conn._zone_name = Name('no-such-zone.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('partial-match-zone.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('no-soa.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('dup-soa.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+ def test_send_query(self):
def message_has_tsig(data):
# a simple check if the actual data contains a TSIG RR.
# At our level this simple check should suffice; other detailed
@@ -269,14 +803,6 @@ class TestXfrinConnection(unittest.TestCase):
msg.from_wire(data)
return msg.get_tsig_record() is not None
- self.conn._create_query = create_msg
- # soa request
- self.conn._send_query(RRType.SOA())
- self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x06\x00\x01')
- # axfr request
- self.conn._send_query(RRType.AXFR())
- self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
-
# soa request with tsig
self.conn._tsig_key = TSIG_KEY
self.conn._send_query(RRType.SOA())
@@ -288,24 +814,28 @@ class TestXfrinConnection(unittest.TestCase):
def test_response_with_invalid_msg(self):
self.conn.reply_data = b'aaaxxxx'
- self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+ self.assertRaises(XfrinTestException,
+ self.conn._handle_xfrin_responses)
def test_response_with_tsigfail(self):
self.conn._tsig_key = TSIG_KEY
# server tsig check fail, return with RCODE 9 (NOTAUTH)
self.conn._send_query(RRType.SOA())
self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_without_end_soa(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data()
- self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+ # This should result in timeout in the asyncore loop. We emulate
+ # that situation in recv() by emptying the reply data buffer.
+ self.assertRaises(XfrinTestTimeoutException,
+ self.conn._handle_xfrin_responses)
def test_response_bad_qid(self):
self.conn._send_query(RRType.AXFR())
- self.conn.reply_data = self.conn.create_response_data(bad_qid = True)
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_error_code_bad_sig(self):
self.conn._tsig_key = TSIG_KEY
@@ -318,7 +848,7 @@ class TestXfrinConnection(unittest.TestCase):
# validate log message for XfrinException
self.__match_exception(XfrinException,
"TSIG verify fail: BADSIG",
- self._handle_xfrin_response)
+ self.conn._handle_xfrin_responses)
def test_response_bad_qid_bad_key(self):
self.conn._tsig_key = TSIG_KEY
@@ -330,36 +860,29 @@ class TestXfrinConnection(unittest.TestCase):
# validate log message for XfrinException
self.__match_exception(XfrinException,
"TSIG verify fail: BADKEY",
- self._handle_xfrin_response)
+ self.conn._handle_xfrin_responses)
def test_response_non_response(self):
self.conn._send_query(RRType.AXFR())
- self.conn.reply_data = self.conn.create_response_data(response = False)
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.conn.reply_data = self.conn.create_response_data(response=False)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_error_code(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(
rcode=Rcode.SERVFAIL())
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_multi_question(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(
questions=[example_axfr_question, example_axfr_question])
- self.assertRaises(XfrinException, self._handle_xfrin_response)
-
- def test_response_empty_answer(self):
- self.conn._send_query(RRType.AXFR())
- self.conn.reply_data = self.conn.create_response_data(answers=[])
- # Should an empty answer trigger an exception? Even though it's very
- # unusual it's not necessarily invalid. Need to revisit.
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_non_response(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(response = False)
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_soacheck(self):
# we need to defer the creation until we know the QID, which is
@@ -450,30 +973,155 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_normal_response_data
self.conn._shutdown_event.set()
self.conn._send_query(RRType.AXFR())
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_timeout(self):
self.conn.response_generator = self._create_normal_response_data
self.conn.force_time_out = True
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_remote_close(self):
self.conn.response_generator = self._create_normal_response_data
self.conn.force_close = True
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_bad_message(self):
self.conn.response_generator = self._create_broken_response_data
self.conn._send_query(RRType.AXFR())
- self.assertRaises(Exception, self._handle_xfrin_response)
+ self.assertRaises(Exception, self.conn._handle_xfrin_responses)
+
+ def test_axfr_response(self):
+ # A simple normal case: AXFR consists of SOA, NS, then trailing SOA.
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_response_empty_answer(self):
+ '''Test with an empty AXFR answer section.
+
+ This is an unusual response, but there is no reason to reject it.
+ The second message is a complete AXFR response, and transfer should
+ succeed just like the normal case.
+
+ '''
+
+ self.axfr_response_params['answer_1st'] = []
+ self.axfr_response_params['answer_2nd'] = [soa_rrset,
+ self._create_ns(),
+ soa_rrset]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_soa_mismatch(self):
+ '''AXFR response whose begin/end SOAs are not same.
+
+ What should we do this is moot, for now we accept it, so does BIND 9.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.AXFR())],
+ # begin serial=1230, end serial=1234. end will be used.
+ answers=[begin_soa_rrset, ns_rr, a_rr, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_extra(self):
+ '''Test with an extra RR after the end of AXFR session.
+
+ The session should be rejected, and nothing should be committed.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.AXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_qname_mismatch(self):
+ '''AXFR response with a mismatch question name.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ self.axfr_response_params['question_1st'] = \
+ [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR())]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_qclass_mismatch(self):
+ '''AXFR response with a mismatch RR class.
+
+ Our implementation accepts that, so does BIND 9.
- def test_response(self):
- # normal case.
+ '''
+ self.axfr_response_params['question_1st'] = \
+ [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.AXFR())]
self.conn.response_generator = self._create_normal_response_data
self.conn._send_query(RRType.AXFR())
- # two SOAs, and only these have been transfered. the 2nd SOA is just
- # a marker, so only 1 RR has been provided in the iteration.
- self.assertEqual(self._handle_xfrin_response(), 1)
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_qtype_mismatch(self):
+ '''AXFR response with a mismatch RR type.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ # returning IXFR in question to AXFR query
+ self.axfr_response_params['question_1st'] = \
+ [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.IXFR())]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_empty_question(self):
+ '''AXFR response with an empty question.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ self.axfr_response_params['question_1st'] = []
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
def test_do_xfrin(self):
self.conn.response_generator = self._create_normal_response_data
@@ -487,9 +1135,10 @@ class TestXfrinConnection(unittest.TestCase):
lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
- # We use two messages in the tests. The same context should have been
- # usef for both.
- self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
def test_do_xfrin_with_tsig_fail(self):
# TSIG verify will fail for the first message. xfrin should fail
@@ -569,10 +1218,10 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_broken_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
- def test_do_xfrin_dberror(self):
- # DB file is under a non existent directory, so its creation will fail,
- # which will make the transfer fail.
- self.conn._db_file = "not_existent/" + TEST_DB_FILE
+ def test_do_xfrin_datasrc_error(self):
+ # Emulate failure in the data source client on commit.
+ self.conn._datasrc_client.force_fail = True
+ self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
def test_do_soacheck_and_xfrin(self):
@@ -598,10 +1247,7 @@ class TestXfrinConnection(unittest.TestCase):
def test_do_soacheck_broken_response(self):
self.conn.response_generator = self._create_broken_response_data
- # XXX: TODO: this test failed here, should xfr not raise an
- # exception but simply drop and return FAIL?
- #self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
- self.assertRaises(MessageTooShort, self.conn.do_xfrin, True)
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
def test_do_soacheck_badqid(self):
# the QID mismatch would internally trigger a XfrinException exception,
@@ -610,59 +1256,396 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
- def _handle_xfrin_response(self):
- # This helper methods iterates over all RRs (excluding the ending SOA)
- # transferred, and simply returns the number of RRs. The return value
- # may be used an assertion value for test cases.
- rrs = 0
- for rr in self.conn._handle_xfrin_response():
- rrs += 1
- return rrs
+class TestIXFRResponse(TestXfrinConnection):
+ def setUp(self):
+ super().setUp()
+ self.conn._query_id = self.conn.qid = 1035
+ self.conn._request_serial = 1230
+ self.conn._request_type = RRType.IXFR()
+ self._zone_name = TEST_ZONE_NAME
+ self.conn._datasrc_client = MockDataSourceClient()
+ XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
- def _create_normal_response_data(self):
- # This helper method creates a simple sequence of DNS messages that
- # forms a valid XFR transaction. It consists of two messages, each
- # containing just a single SOA RR.
- tsig_1st = self.axfr_response_params['tsig_1st']
- tsig_2nd = self.axfr_response_params['tsig_2nd']
- self.conn.reply_data = self.conn.create_response_data(tsig_ctx=tsig_1st)
- self.conn.reply_data += \
- self.conn.create_response_data(tsig_ctx=tsig_2nd)
+ def test_ixfr_response(self):
+ '''A simplest form of IXFR response.
- def _create_soa_response_data(self):
- # This helper method creates a DNS message that is supposed to be
- # used a valid response to SOA queries prior to XFR.
- # If tsig is True, it tries to verify the query with a locally
- # created TSIG context (which may or may not succeed) so that the
- # response will include a TSIG.
- # If axfr_after_soa is True, it resets the response_generator so that
- # a valid XFR messages will follow.
-
- verify_ctx = None
- if self.soa_response_params['tsig']:
- # xfrin (curreently) always uses TCP. strip off the length field.
- query_data = self.conn.query_data[2:]
- query_message = Message(Message.PARSE)
- query_message.from_wire(query_data)
- verify_ctx = TSIGContext(TSIG_KEY)
- verify_ctx.verify(query_message.get_tsig_record(), query_data)
+ It simply updates the zone's SOA one time.
+ '''
self.conn.reply_data = self.conn.create_response_data(
- bad_qid=self.soa_response_params['bad_qid'],
- response=self.soa_response_params['response'],
- rcode=self.soa_response_params['rcode'],
- questions=self.soa_response_params['questions'],
- tsig_ctx=verify_ctx)
- if self.soa_response_params['axfr_after_soa'] != None:
- self.conn.response_generator = \
- self.soa_response_params['axfr_after_soa']
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_multi_sequences(self):
+ '''Similar to the previous case, but with multiple diff seqs.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset,
+ # removing one A in serial 1230
+ begin_soa_rrset, self._create_a('192.0.2.1'),
+ # adding one A in serial 1231
+ self._create_soa('1231'), self._create_a('192.0.2.2'),
+ # removing one A in serial 1231
+ self._create_soa('1231'), self._create_a('192.0.2.3'),
+ # adding one A in serial 1232
+ self._create_soa('1232'), self._create_a('192.0.2.4'),
+ # removing one A in serial 1232
+ self._create_soa('1232'), self._create_a('192.0.2.5'),
+ # adding one A in serial 1234
+ soa_rrset, self._create_a('192.0.2.6'),
+ soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset),
+ ('delete', self._create_a('192.0.2.1')),
+ ('add', self._create_soa('1231')),
+ ('add', self._create_a('192.0.2.2'))],
+ [('delete', self._create_soa('1231')),
+ ('delete', self._create_a('192.0.2.3')),
+ ('add', self._create_soa('1232')),
+ ('add', self._create_a('192.0.2.4'))],
+ [('delete', self._create_soa('1232')),
+ ('delete', self._create_a('192.0.2.5')),
+ ('add', soa_rrset),
+ ('add', self._create_a('192.0.2.6'))]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_multi_messages(self):
+ '''Similar to the first case, but RRs span over multiple messages.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset])
+ self.conn.reply_data += self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_broken(self):
+ '''Test with a broken response.
+
+ '''
+ # SOA sequence is out-of-sync
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ # no diffs should have been committed
+ check_diffs(self.assertEqual,
+ [], self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_extra(self):
+ '''Test with an extra RR after the end of IXFR diff sequences.
+
+ IXFR should be rejected, but complete diff sequences should be
+ committed; it's not clear whether it's compliant to the protocol
+ specification, but it is how BIND 9 works and we do the same.
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset,
+ self._create_a('192.0.2.1')])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response(self):
+ '''AXFR-style IXFR response.
+
+ It simply updates the zone's SOA one time.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ # The SOA should be added exactly once, and in our implementation
+ # it should be added at the end of the sequence.
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response_mismatch_soa(self):
+ '''AXFR-style IXFR response, but the two SOA are not the same.
+
+ In the current implementation, we accept it and use the second SOA.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, begin_soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr),
+ ('add', begin_soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response_extra(self):
+ '''Test with an extra RR after the end of AXFR-style IXFR session.
+
+ The session should be rejected, and nothing should be committed.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+class TestIXFRSession(TestXfrinConnection):
+ '''Tests for a full IXFR session (query and response).
+
+ Detailed corner cases should have been covered in test_create_query()
+ and TestIXFRResponse, so we'll only check some typical cases to confirm
+ the general logic flow.
+ '''
+ def setUp(self):
+ super().setUp()
- def _create_broken_response_data(self):
- # This helper method creates a bogus "DNS message" that only contains
- # 4 octets of data. The DNS message parser will raise an exception.
- bogus_data = b'xxxx'
- self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
- self.conn.reply_data += bogus_data
+ def test_do_xfrin(self):
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
+ # Check some details of the IXFR protocol processing
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ # Check if the query was IXFR.
+ qdata = self.conn.query_data[2:]
+ qmsg = Message(Message.PARSE)
+ qmsg.from_wire(qdata, len(qdata))
+ self.assertEqual(1, qmsg.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(TEST_ZONE_NAME, qmsg.get_question()[0].get_name())
+ self.assertEqual(RRType.IXFR(), qmsg.get_question()[0].get_type())
+
+ def test_do_xfrin_fail(self):
+ '''IXFR fails due to a protocol error.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.conn.response_generator = create_ixfr_response
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+ def test_do_xfrin_fail(self):
+ '''IXFR fails due to a bogus DNS message.
+
+ '''
+ self._create_broken_response_data()
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+class TestXFRSessionWithSQLite3(TestXfrinConnection):
+ '''Tests for XFR sessions using an SQLite3 DB.
+
+ These are provided mainly to confirm the implementation actually works
+ in an environment closer to actual operational environments. So we
+ only check a few common cases; other details are tested using mock
+ data sources.
+
+ '''
+ def setUp(self):
+ self.sqlite3db_src = TESTDATA_SRCDIR + '/example.com.sqlite3'
+ self.sqlite3db_obj = TESTDATA_OBJDIR + '/example.com.sqlite3.copy'
+ self.empty_sqlite3db_obj = TESTDATA_OBJDIR + '/empty.sqlite3'
+ self.sqlite3db_cfg = "{ \"database_file\": \"" +\
+ self.sqlite3db_obj + "\"}"
+ super().setUp()
+ if os.path.exists(self.sqlite3db_obj):
+ os.unlink(self.sqlite3db_obj)
+ if os.path.exists(self.empty_sqlite3db_obj):
+ os.unlink(self.empty_sqlite3db_obj)
+ shutil.copyfile(self.sqlite3db_src, self.sqlite3db_obj)
+ self.conn._datasrc_client = DataSourceClient("sqlite3",
+ self.sqlite3db_cfg)
+
+ def tearDown(self):
+ if os.path.exists(self.sqlite3db_obj):
+ os.unlink(self.sqlite3db_obj)
+ if os.path.exists(self.empty_sqlite3db_obj):
+ os.unlink(self.empty_sqlite3db_obj)
+
+ def get_zone_serial(self):
+ result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+ self.assertEqual(DataSourceClient.SUCCESS, result)
+ result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual(1, soa.get_rdata_count())
+ return get_soa_serial(soa.get_rdata()[0])
+
+ def record_exist(self, name, type):
+ result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+ self.assertEqual(DataSourceClient.SUCCESS, result)
+ result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+ return result == ZoneFinder.SUCCESS
+
+ def test_do_ixfrin_sqlite3(self):
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+
+ # Confirm xfrin succeeds and SOA is updated
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1234, self.get_zone_serial())
+
+ def test_do_ixfrin_sqlite3_fail(self):
+ '''Similar to the previous test, but xfrin fails due to error.
+
+ Check the DB is not changed.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.conn.response_generator = create_ixfr_response
+
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1230, self.get_zone_serial())
+
+ def test_do_ixfrin_nozone_sqlite3(self):
+ self.conn._zone_name = Name('nosuchzone.example')
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ # This should fail even before starting state transition
+ self.assertEqual(None, self.conn.get_xfrstate())
+
+ def axfr_check(self, type):
+ '''Common checks for AXFR and AXFR-style IXFR
+
+ '''
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, type)],
+ answers=[soa_rrset, self._create_ns(), soa_rrset])
+ self.conn.response_generator = create_response
+
+ # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
+ self.assertEqual(1234, self.get_zone_serial())
+ self.assertFalse(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+
+ def test_do_ixfrin_axfr_sqlite3(self):
+ '''AXFR-style IXFR.
+
+ '''
+ self.axfr_check(RRType.IXFR())
+
+ def test_do_axfrin_sqlite3(self):
+ '''AXFR.
+
+ '''
+ self.axfr_check(RRType.AXFR())
+
+ def axfr_failure_check(self, type):
+ '''Similar to the previous two tests, but xfrin fails due to error.
+
+ Check the DB is not changed.
+
+ '''
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, type)],
+ answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
+ self.conn.response_generator = create_response
+
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+
+ def test_do_xfrin_axfr_sqlite3_fail(self):
+ '''Failure case for AXFR-style IXFR.
+
+ '''
+ self.axfr_failure_check(RRType.IXFR())
+
+ def test_do_axfrin_sqlite3_fail(self):
+ '''Failure case for AXFR.
+
+ '''
+ self.axfr_failure_check(RRType.AXFR())
+
+ def test_do_axfrin_nozone_sqlite3(self):
+ '''AXFR test with an empty SQLite3 DB file, thus no target zone there.
+
+ For now, we provide backward compatible behavior: xfrin will create
+ the zone (after even setting up the entire schema) in the zone.
+ Note: a future version of this test will make it fail.
+
+ '''
+ self.conn._db_file = self.empty_sqlite3db_obj
+ self.conn._datasrc_client = DataSourceClient(
+ "sqlite3",
+ "{ \"database_file\": \"" + self.empty_sqlite3db_obj + "\"}")
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.AXFR())],
+ answers=[soa_rrset, self._create_ns(), soa_rrset])
+ self.conn.response_generator = create_response
+ self.conn._zone_name = Name('example.com')
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
+ self.assertEqual(type(XfrinAXFREnd()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual(1234, self.get_zone_serial())
+ self.assertFalse(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
class TestXfrinRecorder(unittest.TestCase):
def setUp(self):
@@ -789,6 +1772,8 @@ class TestXfrin(unittest.TestCase):
self.args)['result'][0], 0)
self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
+ # By default we use AXFR (for now)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_short_command1(self):
# try it when only specifying the zone name (of unknown zone)
@@ -901,6 +1886,8 @@ class TestXfrin(unittest.TestCase):
self.xfr.xfrin_started_master_addr)
self.assertEqual(int(TEST_MASTER_PORT),
self.xfr.xfrin_started_master_port)
+ # By default we use AXFR (for now)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def test_command_handler_notify(self):
# at this level, refresh is no different than retransfer.
@@ -909,7 +1896,7 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 1)
- def test_command_handler_notify_known_zone(self):
+ def test_command_handler_notify_known_zone_bad_addr(self):
# try it with a known zone
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
@@ -921,18 +1908,39 @@ class TestXfrin(unittest.TestCase):
}
]}
self.xfr.config_handler(zones)
+ # the command should now fail
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+ def test_command_handler_notify_known_zone(self):
+ # try it with a known zone
+ self.args['master'] = TEST_MASTER_IPV6_ADDRESS
+
+ # with a zone configuration that has a matching master address.
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV6_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 0)
+ # Note: The rest of the tests won't pass due to the change in #1298
+ # We should probably simply remove the test cases, but for now we
+ # just comment them out. (Note also that the comment about 'not
+ # from the config' is now wrong, because we used the matching address.)
+ #
# and see if we used the address from the command, and not from
# the config
# This is actually NOT the address given in the command, which
# would at this point not make sense, see the TODO in
# xfrin.py.in Xfrin.command_handler())
- self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
- self.xfr.xfrin_started_master_addr)
- self.assertEqual(int(TEST_MASTER_PORT),
- self.xfr.xfrin_started_master_port)
+# self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+# self.xfr.xfrin_started_master_addr)
+# self.assertEqual(int(TEST_MASTER_PORT),
+# self.xfr.xfrin_started_master_port)
def test_command_handler_unknown(self):
self.assertEqual(self.xfr.command_handler("xxx", None)['result'][0], 1)
@@ -955,20 +1963,24 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
else:
self.assertIsNone(zone_info.tsig_key)
- if 'ixfr_disabled' in zone_config and\
- zone_config.get('ixfr_disabled'):
- self.assertTrue(zone_info.ixfr_disabled)
+ if 'use_ixfr' in zone_config and\
+ zone_config.get('use_ixfr'):
+ self.assertTrue(zone_info.use_ixfr)
else:
# if not set, should default to False
- self.assertFalse(zone_info.ixfr_disabled)
+ self.assertFalse(zone_info.use_ixfr)
- def test_command_handler_zones(self):
+ def test_config_handler_zones(self):
+ # This test passes a number of good and bad configs, and checks whether
+ # the values are reflected in the structure that will dictate the
+ # actual behaviour. It also checks if bad values are correctly
+ # handled
config1 = { 'transfers_in': 3,
'zones': [
{ 'name': 'test.example.',
'master_addr': '192.0.2.1',
'master_port': 53,
- 'ixfr_disabled': False
+ 'use_ixfr': False
}
]}
self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
@@ -980,7 +1992,7 @@ class TestXfrin(unittest.TestCase):
'master_addr': '192.0.2.2',
'master_port': 53,
'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
- 'ixfr_disabled': True
+ 'use_ixfr': True
}
]}
self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
@@ -1090,6 +2102,50 @@ class TestXfrin(unittest.TestCase):
# since this has failed, we should still have the previous config
self._check_zones_config(config2)
+ def test_config_handler_zones_default(self):
+ # Checking it some default config values apply. Using a separate
+ # test case for a fresh xfr object.
+ config = { 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.1',
+ 'master_port': 53,
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+ self._check_zones_config(config)
+
+ def common_ixfr_setup(self, xfr_mode, use_ixfr):
+ # This helper method explicitly sets up a zone configuration with
+ # use_ixfr, and invokes either retransfer or refresh.
+ # Shared by some of the following test cases.
+ config = {'zones': [
+ {'name': 'example.com.',
+ 'master_addr': '192.0.2.1',
+ 'use_ixfr': use_ixfr}]}
+ self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+ self.assertEqual(self.xfr.command_handler(xfr_mode,
+ self.args)['result'][0], 0)
+
+ def test_command_handler_retransfer_ixfr_enabled(self):
+ # If IXFR is explicitly enabled in config, IXFR will be used
+ self.common_ixfr_setup('retransfer', True)
+ self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_refresh_ixfr_enabled(self):
+ # Same for refresh
+ self.common_ixfr_setup('refresh', True)
+ self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_retransfer_ixfr_disabled(self):
+ # Similar to the previous case, but explicitly disabled. AXFR should
+ # be used.
+ self.common_ixfr_setup('retransfer', False)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_refresh_ixfr_disabled(self):
+ # Same for refresh
+ self.common_ixfr_setup('refresh', False)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def raise_interrupt():
raise KeyboardInterrupt()
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index a77a383..1f5d9a1 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -28,7 +28,9 @@ from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
from isc.notify import notify_out
import isc.util.process
+from isc.datasrc import DataSourceClient, ZoneFinder
import isc.net.parse
+from isc.xfrin.diff import Diff
from isc.log_messages.xfrin_messages import *
isc.log.init("b10-xfrin")
@@ -62,6 +64,9 @@ ZONE_MANAGER_MODULE_NAME = 'Zonemgr'
REFRESH_FROM_ZONEMGR = 'refresh_from_zonemgr'
ZONE_XFRIN_FAILED = 'zone_xfrin_failed'
+# Constants for debug levels, to be removed when we have #1074.
+DBG_XFRIN_TRACE = 3
+
# These two default are currently hard-coded. For config this isn't
# necessary, but we need these defaults for optional command arguments
# (TODO: have similar support to get default values for command
@@ -77,6 +82,11 @@ XFRIN_FAIL = 1
class XfrinException(Exception):
pass
+class XfrinProtocolError(Exception):
+ '''An exception raised for errors encountered in xfrin protocol handling.
+ '''
+ pass
+
class XfrinZoneInfoException(Exception):
"""This exception is raised if there is an error in the given
configuration (part), or when a command does not have a required
@@ -112,29 +122,358 @@ def _check_zone_class(zone_class_str):
except InvalidRRClass as irce:
raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
+def get_soa_serial(soa_rdata):
+ '''Extract the serial field of an SOA RDATA and returns it as an intger.
+
+ We don't have to be very efficient here, so we first dump the entire RDATA
+ as a string and convert the first corresponding field. This should be
+ sufficient in practice, but may not always work when the MNAME or RNAME
+ contains an (escaped) space character in their labels. Ideally there
+ should be a more direct and convenient way to get access to the SOA
+ fields.
+ '''
+ return int(soa_rdata.to_text().split()[2])
+
+class XfrinState:
+ '''
+ The states of the incomding *XFR state machine.
+
+ We (will) handle both IXFR and AXFR with a single integrated state
+ machine because they cannot be distinguished immediately - an AXFR
+ response to an IXFR request can only be detected when the first two (2)
+ response RRs have already been received.
+
+ The following diagram summarizes the state transition. After sending
+ the query, xfrin starts the process with the InitialSOA state (all
+ IXFR/AXFR response begins with an SOA). When it reaches IXFREnd
+ or AXFREnd, the process successfully completes.
+
+ (AXFR or
+ (recv SOA) AXFR-style IXFR) (SOA, add)
+ InitialSOA------->FirstData------------->AXFR--------->AXFREnd
+ | | ^ (post xfr
+ | | | checks, then
+ | +--+ commit)
+ | (non SOA, add)
+ |
+ | (non SOA, delete)
+ (pure IXFR,| +-------+
+ keep handling)| (Delete SOA) V |
+ + ->IXFRDeleteSOA------>IXFRDelete--+
+ ^ |
+ (see SOA, not end, | (see SOA)|
+ commit, keep handling) | |
+ | V
+ +---------IXFRAdd<----------+IXFRAddSOA
+ (non SOA, add)| ^ | (Add SOA)
+ ----------+ |
+ |(see SOA w/ end serial, commit changes)
+ V
+ IXFREnd
+
+ Note that changes are committed for every "difference sequence"
+ (i.e. changes for one SOA update). This means when an IXFR response
+ contains multiple difference sequences and something goes wrong
+ after several commits, these changes have been published and visible
+ to clients even if the IXFR session is subsequently aborted.
+ It is not clear if this is valid in terms of the protocol specification.
+ Section 4 of RFC 1995 states:
+
+ An IXFR client, should only replace an older version with a newer
+ version after all the differences have been successfully processed.
+
+ If this "replacement" is for the changes of one difference sequence
+ and "all the differences" mean the changes for that sequence, this
+ implementation strictly follows what RFC states. If this is for
+ the entire IXFR response (that may contain multiple sequences),
+ we should implement it with one big transaction and one final commit
+ at the very end.
+
+ For now, we implement it with multiple smaller commits for two
+ reasons. First, this is what BIND 9 does, and we generally port
+ the implementation logic here. BIND 9 has been supporting IXFR
+ for many years, so the fact that it still behaves this way
+ probably means it at least doesn't cause a severe operational
+ problem in practice. Second, especially because BIND 10 would
+ often uses a database backend, a larger transaction could cause an
+ undesirable effects, e.g. suspending normal lookups for a longer
+ period depending on the characteristics of the database. Even if
+ we find something wrong in a later sequeunce and abort the
+ session, we can start another incremental update from what has
+ been validated, or we can switch to AXFR to replace the zone
+ completely.
+
+ This implementation uses the state design pattern, where each state
+ is represented as a subclass of the base XfrinState class. Each concrete
+ subclass of XfrinState is assumed to define two methods: handle_rr() and
+ finish_message(). These methods handle specific part of XFR protocols
+ and (if necessary) perform the state transition.
+
+ Conceptually, XfrinState and its subclasses are a "friend" of
+ XfrinConnection and are assumed to be allowed to access its internal
+ information (even though Python does not have a strict access control
+ between different classes).
+
+ The XfrinState and its subclasses are designed to be stateless, and
+ can be used as singleton objects. For now, however, we always instantiate
+ a new object for every state transition, partly because the introduction
+ of singleton will make a code bit complicated, and partly because
+ the overhead of object instantiotion wouldn't be significant for xfrin.
+
+ '''
+ def set_xfrstate(self, conn, new_state):
+ '''Set the XfrConnection to a given new state.
+
+ As a "friend" class, this method intentionally gets access to the
+ connection's "private" method.
+
+ '''
+ conn._XfrinConnection__set_xfrstate(new_state)
+
+ def handle_rr(self, conn):
+ '''Handle one RR of an XFR response message.
+
+ Depending on the state, the RR is generally added or deleted in the
+ corresponding data source, or in some special cases indicates
+ a specifi transition, such as starting a new IXFR difference
+ sequence or completing the session.
+
+ All subclass has their specific behaviors for this method, so
+ there is no default definition. If the base class version
+ is called, it's a bug of the caller, and it's notified via
+ an XfrinException exception.
+
+ This method returns a boolean value: True if the given RR was
+ fully handled and the caller should go to the next RR; False
+ if the caller needs to call this method with the (possibly) new
+ state for the same RR again.
+
+ '''
+ raise XfrinException("Internal bug: " +
+ "XfrinState.handle_rr() called directly")
+
+ def finish_message(self, conn):
+ '''Perform any final processing after handling all RRs of a response.
+
+ This method then returns a boolean indicating whether to continue
+ receiving the message. Unless it's in the end of the entire XFR
+ session, we should continue, so this default method simply returns
+ True.
+
+ '''
+ return True
+
+class XfrinInitialSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ raise XfrinProtocolError('First RR in zone transfer must be SOA ('
+ + rr.get_type().to_text() + ' received)')
+ conn._end_serial = get_soa_serial(rr.get_rdata()[0])
+
+ # FIXME: we need to check the serial is actually greater than ours.
+ # To do so, however, we need to implement serial number arithmetic.
+ # Although it wouldn't be a big task, we'll leave it for a separate
+ # task for now. (Always performing xfr could be inefficient, but
+ # shouldn't do any harm otherwise)
+
+ self.set_xfrstate(conn, XfrinFirstData())
+ return True
+
+class XfrinFirstData(XfrinState):
+ def handle_rr(self, conn, rr):
+ '''Handle the first RR after initial SOA in an XFR session.
+
+ This state happens exactly once in an XFR session, where
+ we decide whether it's incremental update ("real" IXFR) or
+ non incremental update (AXFR or AXFR-style IXFR).
+ If we initiated IXFR and the transfer begins with two SOAs
+ (the serial of the second one being equal to our serial),
+ it's incremental; otherwise it's non incremental.
+
+ This method always return False (unlike many other handle_rr()
+ methods) because this first RR must be examined again in the
+ determined update context.
+
+ Note that in the non incremental case the RR should normally be
+ something other SOA, but it's still possible it's an SOA with a
+ different serial than ours. The only possible interpretation at
+ this point is that it's non incremental update that only consists
+ of the SOA RR. It will result in broken zone (for example, it
+ wouldn't even contain an apex NS) and should be rejected at post
+ XFR processing, but in terms of the XFR session processing we
+ accept it and move forward.
+
+ Note further that, in the half-broken SOA-only transfer case,
+ these two SOAs are supposed to be the same as stated in Section 2.2
+ of RFC 5936. We don't check that condition here, either; we'll
+ leave whether and how to deal with that situation to the end of
+ the processing of non incremental update. See also a related
+ discussion at the IETF dnsext wg:
+ http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+
+ '''
+ if conn._request_type == RRType.IXFR() and \
+ rr.get_type() == RRType.SOA() and \
+ conn._request_serial == get_soa_serial(rr.get_rdata()[0]):
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_INCREMENTAL_RESP,
+ conn.zone_str())
+ self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+ else:
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_NONINCREMENTAL_RESP,
+ conn.zone_str())
+ # We are now going to add RRs to the new zone. We need create
+ # a Diff object. It will be used throughtout the XFR session.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name, True)
+ self.set_xfrstate(conn, XfrinAXFR())
+ return False
+
+class XfrinIXFRDeleteSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ # this shouldn't happen; should this occur it means an internal
+ # bug.
+ raise XfrinException(rr.get_type().to_text() +
+ ' RR is given in IXFRDeleteSOA state')
+ # This is the beginning state of one difference sequence (changes
+ # for one SOA update). We need to create a new Diff object now.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+ conn._diff.delete_data(rr)
+ self.set_xfrstate(conn, XfrinIXFRDelete())
+ return True
+
+class XfrinIXFRDelete(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() == RRType.SOA():
+ # This is the only place where current_serial is set
+ conn._current_serial = get_soa_serial(rr.get_rdata()[0])
+ self.set_xfrstate(conn, XfrinIXFRAddSOA())
+ return False
+ conn._diff.delete_data(rr)
+ return True
+
+class XfrinIXFRAddSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ # this shouldn't happen; should this occur it means an internal
+ # bug.
+ raise XfrinException(rr.get_type().to_text() +
+ ' RR is given in IXFRAddSOA state')
+ conn._diff.add_data(rr)
+ self.set_xfrstate(conn, XfrinIXFRAdd())
+ return True
+
+class XfrinIXFRAdd(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() == RRType.SOA():
+ soa_serial = get_soa_serial(rr.get_rdata()[0])
+ if soa_serial == conn._end_serial:
+ conn._diff.commit()
+ self.set_xfrstate(conn, XfrinIXFREnd())
+ return True
+ elif soa_serial != conn._current_serial:
+ raise XfrinProtocolError('IXFR out of sync: expected ' +
+ 'serial ' +
+ str(conn._current_serial) +
+ ', got ' + str(soa_serial))
+ else:
+ conn._diff.commit()
+ self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+ return False
+ conn._diff.add_data(rr)
+ return True
+
+class XfrinIXFREnd(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after the end of IXFR diffs: ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ '''Final processing after processing an entire IXFR session.
+
+ There will be more actions here, but for now we simply return False,
+ indicating there will be no more message to receive.
+
+ '''
+ return False
+
+class XfrinAXFR(XfrinState):
+ def handle_rr(self, conn, rr):
+ """
+ Handle the RR by putting it into the zone.
+ """
+ conn._diff.add_data(rr)
+ if rr.get_type() == RRType.SOA():
+ # SOA means end. Don't commit it yet - we need to perform
+ # post-transfer checks
+
+ soa_serial = get_soa_serial(rr.get_rdata()[0])
+ if conn._end_serial != soa_serial:
+ logger.warn(XFRIN_AXFR_INCONSISTENT_SOA, conn.zone_str(),
+ conn._end_serial, soa_serial)
+
+ self.set_xfrstate(conn, XfrinAXFREnd())
+ # Yes, we've eaten this RR.
+ return True
+
+class XfrinAXFREnd(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after the end of AXFR: ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ """
+ Final processing after processing an entire AXFR session.
+
+ In this process all the AXFR changes are committed to the
+ data source.
+
+ There might be more actions here, but for now we simply return False,
+ indicating there will be no more message to receive.
+
+ """
+ conn._diff.commit()
+ return False
+
class XfrinConnection(asyncore.dispatcher):
'''Do xfrin in this class. '''
def __init__(self,
- sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addrinfo, tsig_key = None, verbose = False,
- idle_timeout = 60):
- ''' idle_timeout: max idle time for read data from socket.
- db_file: specify the data source file.
- check_soa: when it's true, check soa first before sending xfr query
+ sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addrinfo, tsig_key=None,
+ idle_timeout=60):
+ '''Constructor of the XfirnConnection class.
+
+ idle_timeout: max idle time for read data from socket.
+ datasrc_client: the data source client object used for the XFR session.
+ This will eventually replace db_file completely.
+
'''
asyncore.dispatcher.__init__(self, map=sock_map)
- self.create_socket(master_addrinfo[0], master_addrinfo[1])
+
+ # The XFR state. Conceptually this is purely private, so we emphasize
+ # the fact by the double underscore. Other classes are assumed to
+ # get access to this via get_xfrstate(), and only XfrinState classes
+ # are assumed to be allowed to modify it via __set_xfrstate().
+ self.__state = None
+
+ # Requested transfer type (RRType.AXFR or RRType.IXFR). The actual
+ # transfer type may differ due to IXFR->AXFR fallback:
+ self._request_type = None
+
+ # Zone parameters
self._zone_name = zone_name
- self._sock_map = sock_map
self._rrclass = rrclass
- self._db_file = db_file
+
+ # Data source handler
+ self._datasrc_client = datasrc_client
+
+ self.create_socket(master_addrinfo[0], master_addrinfo[1])
+ self._sock_map = sock_map
self._soa_rr_count = 0
self._idle_timeout = idle_timeout
self.setblocking(1)
self._shutdown_event = shutdown_event
- self._verbose = verbose
self._master_address = master_addrinfo[2]
self._tsig_key = tsig_key
self._tsig_ctx = None
@@ -145,6 +484,16 @@ class XfrinConnection(asyncore.dispatcher):
def __create_tsig_ctx(self, key):
return TSIGContext(key)
+ def __set_xfrstate(self, new_state):
+ self.__state = new_state
+
+ def get_xfrstate(self):
+ return self.__state
+
+ def zone_str(self):
+ '''A convenient function for logging to include zone name and class'''
+ return self._zone_name.to_text() + '/' + str(self._rrclass)
+
def connect_to_master(self):
'''Connect to master in TCP.'''
@@ -155,17 +504,67 @@ class XfrinConnection(asyncore.dispatcher):
logger.error(XFRIN_CONNECT_MASTER, self._master_address, str(e))
return False
+ def _get_zone_soa(self):
+ result, finder = self._datasrc_client.find_zone(self._zone_name)
+ if result != DataSourceClient.SUCCESS:
+ raise XfrinException('Zone not found in the given data ' +
+ 'source: ' + self.zone_str())
+ result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ if result != ZoneFinder.SUCCESS:
+ raise XfrinException('SOA RR not found in zone: ' +
+ self.zone_str())
+ # Especially for database-based zones, a working zone may be in
+ # a broken state where it has more than one SOA RR. We proactively
+ # check the condition and abort the xfr attempt if we identify it.
+ if soa_rrset.get_rdata_count() != 1:
+ raise XfrinException('Invalid number of SOA RRs for ' +
+ self.zone_str() + ': ' +
+ str(soa_rrset.get_rdata_count()))
+ return soa_rrset
+
def _create_query(self, query_type):
- '''Create dns query message. '''
+ '''Create an XFR-related query message.
+
+ query_type is either SOA, AXFR or IXFR. For type IXFR, it searches
+ the associated data source for the current SOA record to include
+ it in the query. If the corresponding zone or the SOA record
+ cannot be found, it raises an XfrinException exception. Note that
+ this may not necessarily a broken configuration; for the first attempt
+ of transfer the secondary may not have any boot-strap zone
+ information, in which case IXFR simply won't work. The xfrin
+ should then fall back to AXFR. _request_serial is recorded for
+ later use.
+ '''
msg = Message(Message.RENDER)
query_id = random.randint(0, 0xFFFF)
self._query_id = query_id
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name(self._zone_name), self._rrclass, query_type)
- msg.add_question(query_question)
+ msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+ if query_type == RRType.IXFR():
+ # get the zone finder. this must be SUCCESS (not even
+ # PARTIALMATCH) because we are specifying the zone origin name.
+ zone_soa_rr = self._get_zone_soa()
+ msg.add_rrset(Message.SECTION_AUTHORITY, zone_soa_rr)
+ self._request_serial = get_soa_serial(zone_soa_rr.get_rdata()[0])
+ else:
+ # For AXFR, we temporarily provide backward compatible behavior
+ # where xfrin is responsible for creating zone in the corresponding
+ # DB table. Note that the code below uses the old data source
+ # API and assumes SQLite3 in an ugly manner. We'll have to
+ # develop a better way of managing zones in a generic way and
+ # eliminate the code like the one here.
+ try:
+ self._get_zone_soa()
+ except XfrinException:
+ def empty_rr_generator():
+ return []
+ isc.datasrc.sqlite3_ds.load(self._db_file,
+ self._zone_name.to_text(),
+ empty_rr_generator)
return msg
def _send_data(self, data):
@@ -256,39 +655,49 @@ class XfrinConnection(asyncore.dispatcher):
# now.
return XFRIN_OK
- def do_xfrin(self, check_soa, ixfr_first = False):
- '''Do xfr by sending xfr request and parsing response. '''
+ def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
+ '''Do an xfr session by sending xfr request and parsing responses.'''
try:
ret = XFRIN_OK
+ self._request_type = request_type
+ # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
+ # to hardcode here.
+ request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
if check_soa:
- logstr = 'SOA check for \'%s\' ' % self._zone_name
ret = self._check_soa_serial()
if ret == XFRIN_OK:
- logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
- self._send_query(RRType.AXFR())
- isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
- self._handle_xfrin_response)
-
- logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
-
- except XfrinException as e:
- logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
+ logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
+ self.zone_str())
+ self._send_query(self._request_type)
+ self.__state = XfrinInitialSOA()
+ self._handle_xfrin_responses()
+ logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
+ self.zone_str())
+
+ except (XfrinException, XfrinProtocolError) as e:
+ logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
+ self.zone_str(), str(e))
ret = XFRIN_FAIL
- #TODO, recover data source.
- except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
- logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
- ret = XFRIN_FAIL
- except UserWarning as e:
- # XXX: this is an exception from our C++ library via the
- # Boost.Python binding. It would be better to have more more
- # specific exceptions, but at this moment this is the finest
- # granularity.
- logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
+ except Exception as e:
+ # Catching all possible exceptions like this is generally not a
+ # good practice, but handling an xfr session could result in
+ # so many types of exceptions, including ones from the DNS library
+ # or from the data source library. Eventually we'd introduce a
+ # hierarchy for exception classes from a base "ISC exception" and
+ # catch it here, but until then we need broadest coverage so that
+ # we won't miss anything.
+
+ logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+ self.zone_str(), str(e))
ret = XFRIN_FAIL
finally:
- self.close()
+ # Make sure any remaining transaction in the diff is closed
+ # (if not yet - possible in case of xfr-level exception) as soon
+ # as possible
+ self._diff = None
+ self.close()
return ret
@@ -318,9 +727,6 @@ class XfrinConnection(asyncore.dispatcher):
self._check_response_header(msg)
- if msg.get_rr_count(Message.SECTION_ANSWER) == 0:
- raise XfrinException('answer section is empty')
-
if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
raise XfrinException('query section count greater than 1')
@@ -351,14 +757,14 @@ class XfrinConnection(asyncore.dispatcher):
yield (rrset_name, rrset_ttl, rrset_class, rrset_type,
rdata_text)
- def _handle_xfrin_response(self):
- '''Return a generator for the response to a zone transfer. '''
- while True:
+ def _handle_xfrin_responses(self):
+ read_next_msg = True
+ while read_next_msg:
data_len = self._get_request_response(2)
msg_len = socket.htons(struct.unpack('H', data_len)[0])
recvdata = self._get_request_response(msg_len)
msg = Message(Message.PARSE)
- msg.from_wire(recvdata)
+ msg.from_wire(recvdata, Message.PRESERVE_ORDER)
# TSIG related checks, including an unexpected signed response
self._check_response_tsig(msg, recvdata)
@@ -366,12 +772,12 @@ class XfrinConnection(asyncore.dispatcher):
# Perform response status validation
self._check_response_status(msg)
- answer_section = msg.get_section(Message.SECTION_ANSWER)
- for rr in self._handle_answer_section(answer_section):
- yield rr
+ for rr in msg.get_section(Message.SECTION_ANSWER):
+ rr_handled = False
+ while not rr_handled:
+ rr_handled = self.__state.handle_rr(self, rr)
- if self._soa_rr_count == 2:
- break
+ read_next_msg = self.__state.finish_message(self)
if self._shutdown_event.is_set():
raise XfrinException('xfrin is forced to stop')
@@ -393,16 +799,35 @@ class XfrinConnection(asyncore.dispatcher):
pass
def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo, check_soa, verbose,
- tsig_key):
+ shutdown_event, master_addrinfo, check_soa, tsig_key,
+ request_type):
xfrin_recorder.increment(zone_name)
+
+ # Create a data source client used in this XFR session. Right now we
+ # still assume an sqlite3-based data source, and use both the old and new
+ # data source APIs. We also need to use a mock client for tests.
+ # For a temporary workaround to deal with these situations, we skip the
+ # creation when the given file is none (the test case). Eventually
+ # this code will be much cleaner.
+ datasrc_client = None
+ if db_file is not None:
+ # temporary hardcoded sqlite initialization. Once we decide on
+ # the config specification, we need to update this (TODO)
+ # this may depend on #1207, or any followup ticket created for #1207
+ datasrc_type = "sqlite3"
+ datasrc_config = "{ \"database_file\": \"" + db_file + "\"}"
+ datasrc_client = DataSourceClient(datasrc_type, datasrc_config)
+
+ # Create a TCP connection for the XFR session and perform the operation.
sock_map = {}
- conn = XfrinConnection(sock_map, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo,
- tsig_key, verbose)
+ conn = XfrinConnection(sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addrinfo, tsig_key)
+ # XXX: We still need _db_file for temporary workaround in _create_query().
+ # This should be removed when we eliminate the need for the workaround.
+ conn._db_file = db_file
ret = XFRIN_FAIL
if conn.connect_to_master():
- ret = conn.do_xfrin(check_soa)
+ ret = conn.do_xfrin(check_soa, request_type)
# Publish the zone transfer result news, so zonemgr can reset the
# zone timer, and xfrout can notify the zone's slaves if the result
@@ -451,7 +876,7 @@ class ZoneInfo:
self.set_master_port(config_data.get('master_port'))
self.set_zone_class(config_data.get('class'))
self.set_tsig_key(config_data.get('tsig_key'))
- self.set_ixfr_disabled(config_data.get('ixfr_disabled'))
+ self.set_use_ixfr(config_data.get('use_ixfr'))
def set_name(self, name_str):
"""Set the name for this zone given a name string.
@@ -526,28 +951,28 @@ class ZoneInfo:
errmsg = "bad TSIG key string: " + tsig_key_str
raise XfrinZoneInfoException(errmsg)
- def set_ixfr_disabled(self, ixfr_disabled):
- """Set ixfr_disabled. If set to False (the default), it will use
- IXFR for incoming transfers. If set to True, it will use AXFR.
+ def set_use_ixfr(self, use_ixfr):
+ """Set use_ixfr. If set to True, it will use
+ IXFR for incoming transfers. If set to False, it will use AXFR.
At this moment there is no automatic fallback"""
- # don't care what type it is; if evaluates to true, set to True
- if ixfr_disabled:
- self.ixfr_disabled = True
+ # TODO: http://bind10.isc.org/ticket/1279
+ if use_ixfr is None:
+ self.use_ixfr = \
+ self._module_cc.get_default_value("zones/use_ixfr")
else:
- self.ixfr_disabled = False
+ self.use_ixfr = use_ixfr
def get_master_addr_info(self):
return (self.master_addr.family, socket.SOCK_STREAM,
(str(self.master_addr), self.master_port))
class Xfrin:
- def __init__(self, verbose = False):
+ def __init__(self):
self._max_transfers_in = 10
self._zones = {}
self._cc_setup()
self.recorder = XfrinRecorder()
self._shutdown_event = threading.Event()
- self._verbose = verbose
def _cc_setup(self):
'''This method is used only as part of initialization, but is
@@ -635,20 +1060,33 @@ class Xfrin:
# we should check if it matches one of them, and then use it.)
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
zone_info = self._get_zone_info(zone_name, rrclass)
+ notify_addr = self._parse_master_and_port(args, zone_name,
+ rrclass)
if zone_info is None:
# TODO what to do? no info known about zone. defaults?
errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
answer = create_answer(1, errmsg)
else:
+ request_type = RRType.AXFR()
+ if zone_info.use_ixfr:
+ request_type = RRType.IXFR()
master_addr = zone_info.get_master_addr_info()
- ret = self.xfrin_start(zone_name,
- rrclass,
- self._get_db_file(),
- master_addr,
- zone_info.tsig_key,
- True)
- answer = create_answer(ret[0], ret[1])
+ if notify_addr == master_addr:
+ ret = self.xfrin_start(zone_name,
+ rrclass,
+ self._get_db_file(),
+ master_addr,
+ zone_info.tsig_key, request_type,
+ True)
+ answer = create_answer(ret[0], ret[1])
+ else:
+ errmsg = "Got notification for " + zone_name.to_text()\
+ + "from unknown address: " + notify_addr[2][0];
+ logger.error(XFRIN_NOTIFY_UNKNOWN_MASTER,
+ zone_name.to_text(), notify_addr[2][0],
+ master_addr[2][0])
+ answer = create_answer(1, errmsg)
elif command == 'retransfer' or command == 'refresh':
# Xfrin receives the retransfer/refresh from cmdctl(sent by bindctl).
@@ -659,14 +1097,17 @@ class Xfrin:
rrclass)
zone_info = self._get_zone_info(zone_name, rrclass)
tsig_key = None
+ request_type = RRType.AXFR()
if zone_info:
tsig_key = zone_info.tsig_key
+ if zone_info.use_ixfr:
+ request_type = RRType.IXFR()
db_file = args.get('db_file') or self._get_db_file()
ret = self.xfrin_start(zone_name,
rrclass,
db_file,
master_addr,
- tsig_key,
+ tsig_key, request_type,
(False if command == 'retransfer' else True))
answer = create_answer(ret[0], ret[1])
@@ -746,7 +1187,8 @@ class Xfrin:
news(command: zone_new_data_ready) to zone manager and xfrout.
if xfrin failed, just tell the bad news to zone manager, so that
it can reset the refresh timer for that zone. '''
- param = {'zone_name': zone_name, 'zone_class': zone_class.to_text()}
+ param = {'zone_name': zone_name.to_text(),
+ 'zone_class': zone_class.to_text()}
if xfr_result == XFRIN_OK:
msg = create_command(notify_out.ZONE_NEW_DATA_READY_CMD, param)
# catch the exception, in case msgq has been killed.
@@ -783,8 +1225,8 @@ class Xfrin:
while not self._shutdown_event.is_set():
self._cc_check_command()
- def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo, tsig_key,
- check_soa = True):
+ def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+ tsig_key, request_type, check_soa=True):
if "pydnspp" not in sys.modules:
return (1, "xfrin failed, can't load dns message python library: 'pydnspp'")
@@ -798,13 +1240,12 @@ class Xfrin:
xfrin_thread = threading.Thread(target = process_xfrin,
args = (self,
self.recorder,
- zone_name.to_text(),
+ zone_name,
rrclass,
db_file,
self._shutdown_event,
master_addrinfo, check_soa,
- self._verbose,
- tsig_key))
+ tsig_key, request_type))
xfrin_thread.start()
return (0, 'zone xfrin is started')
@@ -823,9 +1264,9 @@ def set_signal_handler():
def set_cmd_options(parser):
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ help="This option is obsolete and has no effect.")
-def main(xfrin_class, use_signal = True):
+def main(xfrin_class, use_signal=True):
"""The main loop of the Xfrin daemon.
@param xfrin_class: A class of the Xfrin object. This is normally Xfrin,
@@ -842,7 +1283,7 @@ def main(xfrin_class, use_signal = True):
if use_signal:
set_signal_handler()
- xfrind = xfrin_class(verbose = options.verbose)
+ xfrind = xfrin_class()
xfrind.startup()
except KeyboardInterrupt:
logger.info(XFRIN_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index bc93720..c1ba61e 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -44,7 +44,7 @@
"item_type": "string",
"item_optional": true
},
- { "item_name": "ixfr_disabled",
+ { "item_name": "use_ixfr",
"item_type": "boolean",
"item_optional": false,
"item_default": false
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 80a0be3..e5d1733 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,25 +15,26 @@
# No namespace declaration - these constants go in the global namespace
# of the xfrin messages python module.
-% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
-The error is shown in the log message.
+% XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
-% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a protocol error.
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a protocol error.
The error is shown in the log message.
-% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+% XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started
A connection to the master server has been made, the serial value in
the SOA record has been checked, and a zone transfer has been started.
-% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
-The AXFR transfer of the given zone was successfully completed.
+% XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded
+The XFR transfer of the given zone was successfully completed.
% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
The given master address is not a valid IP address.
@@ -69,6 +70,12 @@ was killed.
There was a problem sending a message to the zone manager. This most
likely means that the msgq daemon has quit or was killed.
+% XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+
% XFRIN_IMPORT_DNS error importing python DNS module: %1
There was an error importing the python DNS module pydnspp. The most
likely cause is a PYTHONPATH problem.
@@ -89,3 +96,32 @@ daemon will now shut down.
% XFRIN_UNKNOWN_ERROR unknown error: %1
An uncaught exception was raised while running the xfrin daemon. The
exception message is printed in the log message.
+
+% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+
+% XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA. Non incremental transfer is
+either AXFR or AXFR-style IXFR. In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+
+% XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same. According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold. There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected. On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs. For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found. If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index 8a4c7c1..1020ffe 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -8,3 +8,4 @@ unreadVariable:src/lib/dns/rdata/template.cc:61
selfAssignment:src/lib/dns/tests/name_unittest.cc:293
selfAssignment:src/lib/dns/tests/rdata_unittest.cc:228
selfAssignment:src/lib/dns/tests/tsigkey_unittest.cc:137
+selfAssignment:src/lib/dns/tests/rdata_txt_like_unittest.cc:222
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index c825e66..a569ea7 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = exceptions util log cryptolink dns cc config python xfr \
- bench asiolink asiodns nsas cache resolve testutils datasrc \
- acl server_common dhcp
+SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
+ asiolink asiodns nsas cache resolve testutils datasrc \
+ server_common python dhcp
diff --git a/src/lib/bench/Makefile.am b/src/lib/bench/Makefile.am
index 866404f..514b3b3 100644
--- a/src/lib/bench/Makefile.am
+++ b/src/lib/bench/Makefile.am
@@ -6,6 +6,6 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda
-lib_LTLIBRARIES = libbench.la
+noinst_LTLIBRARIES = libbench.la
libbench_la_SOURCES = benchmark_util.h benchmark_util.cc
EXTRA_DIST = benchmark.h
diff --git a/src/lib/bench/tests/run_unittests.cc b/src/lib/bench/tests/run_unittests.cc
index 85d4548..450f5dc 100644
--- a/src/lib/bench/tests/run_unittests.cc
+++ b/src/lib/bench/tests/run_unittests.cc
@@ -13,10 +13,11 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/config/tests/Makefile.am b/src/lib/config/tests/Makefile.am
index 7153e09..2f1fc6f 100644
--- a/src/lib/config/tests/Makefile.am
+++ b/src/lib/config/tests/Makefile.am
@@ -11,7 +11,7 @@ endif
CLEANFILES = *.gcno *.gcda
-lib_LTLIBRARIES = libfake_session.la
+noinst_LTLIBRARIES = libfake_session.la
libfake_session_la_SOURCES = fake_session.h fake_session.cc
TESTS =
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 6b71388..bf1171e 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -9,7 +9,7 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
-lib_LTLIBRARIES = libdatasrc.la
+lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -17,15 +17,25 @@ libdatasrc_la_SOURCES += query.h query.cc
libdatasrc_la_SOURCES += cache.h cache.cc
libdatasrc_la_SOURCES += rbtree.h
libdatasrc_la_SOURCES += zonetable.h zonetable.cc
-libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
libdatasrc_la_SOURCES += client.h iterator.h
libdatasrc_la_SOURCES += database.h database.cc
-libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
+libdatasrc_la_SOURCES += factory.h factory.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
+sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
+sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+sqlite3_ds_la_LIBADD += libdatasrc.la
+sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
+
+memory_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc
+memory_ds_la_LDFLAGS = -module
+memory_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+memory_ds_la_LIBADD += libdatasrc.la
+
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 7679183..35c6f77 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -23,6 +23,47 @@
#include <dns/name.h>
#include <datasrc/zone.h>
+/// \file
+/// Datasource clients
+///
+/// The data source client API is specified in client.h, and provides the
+/// functionality to query and modify data in the data sources. There are
+/// multiple datasource implementations, and by subclassing DataSourceClient or
+/// DatabaseClient, more can be added.
+///
+/// All datasources are implemented as loadable modules, with a name of the
+/// form "<type>_ds.so". This has been chosen intentionally, to minimize
+/// confusion and potential mistakes.
+///
+/// In order to use a datasource client backend, the class
+/// DataSourceClientContainer is provided in factory.h; this will load the
+/// library, set up the instance, and clean everything up once it is destroyed.
+///
+/// Access to the actual instance is provided with the getInstance() method
+/// in DataSourceClientContainer
+///
+/// \note Depending on actual usage, we might consider making the container
+/// a transparent abstraction layer, so it can be used as a DataSourceClient
+/// directly. This has some other implications though so for now the only access
+/// provided is through getInstance()).
+///
+/// For datasource backends, we use a dynamically loaded library system (with
+/// dlopen()). This library must contain the following things;
+/// - A subclass of DataSourceClient or DatabaseClient (which itself is a
+/// subclass of DataSourceClient)
+/// - A creator function for an instance of that subclass, of the form:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+/// \endcode
+/// - A destructor for said instance, of the form:
+/// \code
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+///
+/// See the documentation for the \link DataSourceClient \endlink class for
+/// more information on implementing subclasses of it.
+///
+
namespace isc {
namespace datasrc {
@@ -40,6 +81,9 @@ typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
/// operations to other classes; in general methods of this class act as
/// factories of these other classes.
///
+/// See \link datasrc/client.h datasrc/client.h \endlink for more information
+/// on adding datasource implementations.
+///
/// The following derived classes are currently (expected to be) provided:
/// - \c InMemoryClient: A client of a conceptual data source that stores
/// all necessary data in memory for faster lookups
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index 9422524..c14881a 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -188,9 +188,9 @@ public:
void setClass(isc::dns::RRClass& c) { rrclass = c; }
void setClass(const isc::dns::RRClass& c) { rrclass = c; }
- Result init() { return (NOT_IMPLEMENTED); }
- Result init(isc::data::ConstElementPtr config);
- Result close() { return (NOT_IMPLEMENTED); }
+ virtual Result init() { return (NOT_IMPLEMENTED); }
+ virtual Result init(isc::data::ConstElementPtr config);
+ virtual Result close() { return (NOT_IMPLEMENTED); }
virtual Result findRRset(const isc::dns::Name& qname,
const isc::dns::RRClass& qclass,
@@ -355,7 +355,7 @@ public:
/// \brief Returns the best enclosing zone name found for the given
// name and RR class so far.
- ///
+ ///
/// \return A pointer to the zone apex \c Name, NULL if none found yet.
///
/// This method never throws an exception.
@@ -419,6 +419,6 @@ private:
#endif
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
new file mode 100644
index 0000000..df573b9
--- /dev/null
+++ b/src/lib/datasrc/factory.cc
@@ -0,0 +1,127 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "factory.h"
+
+#include "data_source.h"
+#include "database.h"
+#include "sqlite3_accessor.h"
+#include "memory_datasrc.h"
+
+#include <datasrc/logger.h>
+
+#ifndef _WIN32
+#include <dlfcn.h>
+#endif
+
+using namespace isc::data;
+using namespace isc::datasrc;
+
+namespace isc {
+namespace datasrc {
+
+LibraryContainer::LibraryContainer(const std::string& name) {
+#ifdef _WIN32
+ if (strcmp(name.c_str(), "sqlite3") == 0)
+ ds_lib_ = 1;
+ else if (strcmp(name.c_str(), "memory") == 0)
+ ds_lib_ = 2;
+ else {
+ isc_throw(DataSourceLibraryError,
+ "only \"sqlite3\" and \"memory\" are supported");
+ }
+#else
+ // use RTLD_GLOBAL so that shared symbols (e.g. exceptions)
+ // are recognized as such
+ ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
+ if (ds_lib_ == NULL) {
+ isc_throw(DataSourceLibraryError, dlerror());
+ }
+#endif
+}
+
+LibraryContainer::~LibraryContainer() {
+#ifndef _WIN32
+ dlclose(ds_lib_);
+#endif
+}
+
+void*
+LibraryContainer::getSym(const char* name) {
+#ifdef _WIN32
+ if (strcmp(name, "createInstance") == 0) {
+ if (ds_lib_ == 1)
+ return SQLCreateInstance;
+ else
+ return MemoryCreateInstance;
+ } else if (strcmp(name, "destroyInstance") == 0) {
+ if (ds_lib_ == 1)
+ return SQLDestroyInstance;
+ else
+ return MemoryDestroyInstance;
+ } else {
+ isc_throw(DataSourceLibrarySymbolError,
+ "not \"createInstance\" or \"destroyInstance\"");
+ }
+#else
+ // Since dlsym can return NULL on success, we check for errors by
+ // first clearing any existing errors with dlerror(), then calling dlsym,
+ // and finally checking for errors with dlerror()
+ dlerror();
+
+ void *sym = dlsym(ds_lib_, name);
+
+ const char* dlsym_error = dlerror();
+ if (dlsym_error != NULL) {
+ isc_throw(DataSourceLibrarySymbolError, dlsym_error);
+ }
+
+ return (sym);
+#endif
+}
+
+DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
+ ConstElementPtr config)
+: ds_lib_(type + "_ds.so")
+{
+ // We are casting from a data to a function pointer here
+ // Some compilers (rightfully) complain about that, but
+ // c-style casts are accepted the most here. If we run
+ // into any that also don't like this, we might need to
+ // use some form of union cast or memory copy to get
+ // from the void* to the function pointer.
+ ds_creator* ds_create = (ds_creator*)ds_lib_.getSym("createInstance");
+ destructor_ = (ds_destructor*)ds_lib_.getSym("destroyInstance");
+
+ std::string error;
+ try {
+ instance_ = ds_create(config, error);
+ if (instance_ == NULL) {
+ isc_throw(DataSourceError, error);
+ }
+ } catch (const std::exception& exc) {
+ isc_throw(DataSourceError, "Unknown uncaught exception from " + type +
+ " createInstance: " + exc.what());
+ } catch (...) {
+ isc_throw(DataSourceError, "Unknown uncaught exception from " + type);
+ }
+}
+
+DataSourceClientContainer::~DataSourceClientContainer() {
+ destructor_(instance_);
+}
+
+} // end namespace datasrc
+} // end namespace isc
+
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
new file mode 100644
index 0000000..eedcea2
--- /dev/null
+++ b/src/lib/datasrc/factory.h
@@ -0,0 +1,174 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_FACTORY_H
+#define __DATA_SOURCE_FACTORY_H 1
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
+#include <exceptions/exceptions.h>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace datasrc {
+
+
+/// \brief Raised if there is an error loading the datasource implementation
+/// library
+class DataSourceLibraryError : public DataSourceError {
+public:
+ DataSourceLibraryError(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if there is an error reading a symbol from the datasource
+/// implementation library
+class DataSourceLibrarySymbolError : public DataSourceError {
+public:
+ DataSourceLibrarySymbolError(const char* file, size_t line,
+ const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+typedef DataSourceClient* ds_creator(isc::data::ConstElementPtr config,
+ std::string& error);
+typedef void ds_destructor(DataSourceClient* instance);
+
+/// \brief Container class for dynamically loaded libraries
+///
+/// This class is used to dlopen() a library, provides access to dlsym(),
+/// and cleans up the dlopened library when the instance of this class is
+/// destroyed.
+///
+/// Its main function is to provide RAII-style access to dlopen'ed libraries.
+///
+/// \note Currently it is Datasource-backend specific. If we have need for this
+/// in other places than for dynamically loading datasources, then, apart
+/// from moving it to another location, we also need to make the
+/// exceptions raised more general.
+class LibraryContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \param name The name of the library (.so) file. This file must be in
+ /// the library path.
+ ///
+ /// \exception DataSourceLibraryError If the library cannot be found or
+ /// cannot be loaded.
+ LibraryContainer(const std::string& name);
+
+ /// \brief Destructor
+ ///
+ /// Cleans up the library by calling dlclose()
+ ~LibraryContainer();
+
+ /// \brief Retrieve a symbol
+ ///
+ /// This retrieves a symbol from the loaded library.
+ ///
+ /// \exception DataSourceLibrarySymbolError if the symbol cannot be found,
+ /// or if another error (as reported by dlerror() occurs.
+ ///
+ /// \param name The name of the symbol to retrieve
+ /// \return A pointer to the symbol. This may be NULL, and if so, indicates
+ /// the symbol does indeed exist, but has the value NULL itself.
+ /// If the symbol does not exist, a DataSourceLibrarySymbolError is
+ /// raised.
+ ///
+ /// \note The argument is a const char* (and not a std::string like the
+ /// argument in the constructor). This argument is always a fixed
+ /// string in the code, while the other can be read from
+ /// configuration, and needs modification
+ void* getSym(const char* name);
+private:
+ /// Pointer to the dynamically loaded library structure
+#ifdef _WIN32
+ int ds_lib_;
+#else
+ void *ds_lib_;
+#endif
+};
+
+
+/// \brief Container for a specific instance of a dynamically loaded
+/// DataSourceClient implementation
+///
+/// Given a datasource type and a type-specific set of configuration data,
+/// the corresponding dynamic library is loaded (if it hadn't been already),
+/// and an instance is created. This instance is stored within this structure,
+/// and can be accessed through getInstance(). Upon destruction of this
+/// container, the stored instance of the DataSourceClient is deleted with
+/// the destructor function provided by the loaded library.
+///
+/// The 'type' is actually the name of the library, minus the '_ds.so' postfix
+/// Datasource implementation libraries therefore have a fixed name, both for
+/// easy recognition and to reduce potential mistakes.
+/// For example, the sqlite3 implementation has the type 'sqlite3', and the
+/// derived filename 'sqlite3_ds.so'
+///
+/// There are of course some demands to an implementation, not all of which
+/// can be verified compile-time. It must provide a creator and destructor
+/// functions. The creator function must return an instance of a subclass of
+/// DataSourceClient. The prototypes of these functions are as follows:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+///
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+class DataSourceClientContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \exception DataSourceLibraryError if there is an error loading the
+ /// backend library
+ /// \exception DataSourceLibrarySymbolError if the library does not have
+ /// the needed symbols, or if there is an error reading them
+ /// \exception DataError if the given config is not correct
+ /// for the given type, or if there was a problem during
+ /// initialization
+ ///
+ /// \param type The type of the datasource client. Based on the value of
+ /// type, a specific backend library is used, by appending the
+ /// string '_ds.so' to the given type, and loading that as the
+ /// implementation library
+ /// \param config Type-specific configuration data, see the documentation
+ /// of the datasource backend type for information on what
+ /// configuration data to pass.
+ DataSourceClientContainer(const std::string& type,
+ isc::data::ConstElementPtr config);
+
+ /// \brief Destructor
+ ~DataSourceClientContainer();
+
+ /// \brief Accessor to the instance
+ ///
+ /// \return Reference to the DataSourceClient instance contained in this
+ /// container.
+ DataSourceClient& getInstance() { return *instance_; }
+
+private:
+ DataSourceClient* instance_;
+ ds_destructor* destructor_;
+ LibraryContainer ds_lib_;
+};
+
+} // end namespace datasrc
+} // end namespace isc
+#endif // DATA_SOURCE_FACTORY_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 98f331e..316db34 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -16,6 +16,7 @@
#include <cassert>
#include <boost/shared_ptr.hpp>
#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
#include <exceptions/exceptions.h>
@@ -29,9 +30,13 @@
#include <datasrc/logger.h>
#include <datasrc/iterator.h>
#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+
+#include <cc/data.h>
using namespace std;
using namespace isc::dns;
+using namespace isc::data;
namespace isc {
namespace datasrc {
@@ -809,5 +814,159 @@ ZoneUpdaterPtr
InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
}
+
+
+namespace {
+// convencience function to add an error message to a list of those
+// (TODO: move functions like these to some util lib?)
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+/// Check if the given element exists in the map, and if it is a string
+bool
+checkConfigElementString(ConstElementPtr config, const std::string& name,
+ ElementPtr errors)
+{
+ if (!config->contains(name)) {
+ addError(errors,
+ "Config for memory backend does not contain a '"
+ +name+
+ "' value");
+ return false;
+ } else if (!config->get(name) ||
+ config->get(name)->getType() != Element::string) {
+ addError(errors, "value of " + name +
+ " in memory backend config is not a string");
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool
+checkZoneConfig(ConstElementPtr config, ElementPtr errors) {
+ bool result = true;
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Elements in memory backend's zone list must be maps");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "origin", errors)) {
+ result = false;
+ }
+ if (!checkConfigElementString(config, "file", errors)) {
+ result = false;
+ }
+ // we could add some existence/readabilty/parsability checks here
+ // if we want
+ }
+ return result;
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see [TODO]
+ * So for memory datasource, we get a structure like this:
+ * { "type": string ("memory"),
+ * "class": string ("IN"/"CH"/etc),
+ * "zones": list
+ * }
+ * Zones list is a list of maps:
+ * { "origin": string,
+ * "file": string
+ * }
+ *
+ * At this moment we cannot be completely sure of the contents of the
+ * structure, so we have to do some more extensive tests than should
+ * strictly be necessary (e.g. existence and type of elements)
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for memory backend must be a map");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "type", errors)) {
+ result = false;
+ } else {
+ if (config->get("type")->stringValue() != "memory") {
+ addError(errors,
+ "Config for memory backend is not of type \"memory\"");
+ result = false;
+ }
+ }
+ if (!checkConfigElementString(config, "class", errors)) {
+ result = false;
+ } else {
+ try {
+ RRClass rrc(config->get("class")->stringValue());
+ } catch (const isc::Exception& rrce) {
+ addError(errors,
+ "Error parsing class config for memory backend: " +
+ std::string(rrce.what()));
+ result = false;
+ }
+ }
+ if (!config->contains("zones")) {
+ addError(errors, "No 'zones' element in memory backend config");
+ result = false;
+ } else if (!config->get("zones") ||
+ config->get("zones")->getType() != Element::list) {
+ addError(errors, "'zones' element in memory backend config is not a list");
+ result = false;
+ } else {
+ BOOST_FOREACH(ConstElementPtr zone_config,
+ config->get("zones")->listValue()) {
+ if (!checkZoneConfig(zone_config, errors)) {
+ result = false;
+ }
+ }
+ }
+ }
+
+ return (result);
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+#ifdef _WIN32
+MemoryCreateInstance
+#else
+createInstance
+#endif
+(isc::data::ConstElementPtr config, std::string& error) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ error = "Configuration error: " + errors->str();
+ return (NULL);
+ }
+ try {
+ return (new InMemoryClient());
+ } catch (const std::exception& exc) {
+ error = std::string("Error creating memory datasource: ") + exc.what();
+ return (NULL);
+ } catch (...) {
+ error = std::string("Error creating memory datasource, "
+ "unknown exception");
+ return (NULL);
+ }
+}
+
+void
+#ifdef _WIN32
+MemoryDestroyInstance
+#else
+destroyInstance
+#endif
+(DataSourceClient* instance) {
+ delete instance;
+}
+
+
} // end of namespace datasrc
-} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 95f589a..6b38471 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -22,6 +22,8 @@
#include <datasrc/zonetable.h>
#include <datasrc/client.h>
+#include <cc/data.h>
+
namespace isc {
namespace dns {
class Name;
@@ -219,7 +221,7 @@ private:
/// while it wouldn't be safe to delete unnecessary zones inside the dedicated
/// backend.
///
-/// The findZone() method takes a domain name and returns the best matching
+/// The findZone() method takes a domain name and returns the best matching
/// \c InMemoryZoneFinder in the form of (Boost) shared pointer, so that it can
/// provide the general interface for all data sources.
class InMemoryClient : public DataSourceClient {
@@ -289,6 +291,45 @@ private:
class InMemoryClientImpl;
InMemoryClientImpl* impl_;
};
+
+/// \brief Creates an instance of the Memory datasource client
+///
+/// Currently the configuration passed here must be a MapElement, formed as
+/// follows:
+/// \code
+/// { "type": string ("memory"),
+/// "class": string ("IN"/"CH"/etc),
+/// "zones": list
+/// }
+/// Zones list is a list of maps:
+/// { "origin": string,
+/// "file": string
+/// }
+/// \endcode
+/// (i.e. the configuration that was used prior to the datasource refactor)
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+///
+/// \param config The configuration for the datasource instance
+/// \param error This string will be set to an error message if an error occurs
+/// during initialization
+/// \return An instance of the memory datasource client, or NULL if there was
+/// an error
+#ifdef _WIN32
+extern "C" DataSourceClient* MemoryCreateInstance(isc::data::ConstElementPtr config,
+ std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void MemoryDestroyInstance(DataSourceClient* instance);
+#else
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config,
+ std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+#endif
+
}
}
#endif // __DATA_SOURCE_MEMORY_H
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index d536aa1..1120dc6 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -24,12 +24,16 @@
#include <datasrc/sqlite3_accessor.h>
#include <datasrc/logger.h>
#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
#include <util/filename.h>
using namespace std;
+using namespace isc::data;
#define SQLITE_SCHEMA_VERSION 1
+#define CONFIG_ITEM_DATABASE_FILE "database_file"
+
namespace isc {
namespace datasrc {
@@ -138,19 +142,6 @@ private:
};
SQLite3Accessor::SQLite3Accessor(const std::string& filename,
- const isc::dns::RRClass& rrclass) :
- dbparameters_(new SQLite3Parameters),
- filename_(filename),
- class_(rrclass.toText()),
- database_name_("sqlite3_" +
- isc::util::Filename(filename).nameAndExtension())
-{
- LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
-
- open(filename);
-}
-
-SQLite3Accessor::SQLite3Accessor(const std::string& filename,
const string& rrclass) :
dbparameters_(new SQLite3Parameters),
filename_(filename),
@@ -645,8 +636,12 @@ doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
const size_t column_count =
sizeof(update_params) / sizeof(update_params[0]);
for (int i = 0; i < column_count; ++i) {
- if (sqlite3_bind_text(stmt, ++param_id, update_params[i].c_str(), -1,
- SQLITE_TRANSIENT) != SQLITE_OK) {
+ // The old sqlite3 data source API assumes NULL for an empty column.
+ // We need to provide compatibility at least for now.
+ if (sqlite3_bind_text(stmt, ++param_id,
+ update_params[i].empty() ? NULL :
+ update_params[i].c_str(),
+ -1, SQLITE_TRANSIENT) != SQLITE_OK) {
isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
sqlite3_errmsg(dbparams.db_));
}
@@ -719,5 +714,86 @@ SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
return (result);
}
+namespace {
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see header file
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for SQlite3 backend must be a map");
+ result = false;
+ } else {
+ if (!config->contains(CONFIG_ITEM_DATABASE_FILE)) {
+ addError(errors,
+ "Config for SQlite3 backend does not contain a '"
+ CONFIG_ITEM_DATABASE_FILE
+ "' value");
+ result = false;
+ } else if (!config->get(CONFIG_ITEM_DATABASE_FILE) ||
+ config->get(CONFIG_ITEM_DATABASE_FILE)->getType() !=
+ Element::string) {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is not a string");
+ result = false;
+ } else if (config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue() ==
+ "") {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is empty");
+ result = false;
+ }
+ }
+
+ return (result);
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+#ifdef _WIN32
+SQLCreateInstance
+#else
+createInstance
+#endif
+(isc::data::ConstElementPtr config, std::string& error) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ error = "Configuration error: " + errors->str();
+ return (NULL);
+ }
+ std::string dbfile = config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
+ try {
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(dbfile, "IN")); // XXX: avoid hardcode RR class
+ return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
+ } catch (const std::exception& exc) {
+ error = std::string("Error creating sqlite3 datasource: ") + exc.what();
+ return (NULL);
+ } catch (...) {
+ error = std::string("Error creating sqlite3 datasource, "
+ "unknown exception");
+ return (NULL);
+ }
}
+
+void
+#ifdef _WIN32
+SQLDestroyInstance
+#else
+destroyInstance
+#endif
+(DataSourceClient* instance) {
+ delete instance;
}
+
+} // end of namespace datasrc
+} // end of namespace isc
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index c4bacad..2d2beea 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -24,6 +24,8 @@
#include <boost/scoped_ptr.hpp>
#include <string>
+#include <cc/data.h>
+
namespace isc {
namespace dns {
class RRClass;
@@ -65,20 +67,10 @@ public:
* doesn't work (it is broken, doesn't exist and can't be created, etc).
*
* \param filename The database file to be used.
- * \param rrclass Which class of data it should serve (while the database
- * file can contain multiple classes of data, single database can
- * provide only one class).
- */
- SQLite3Accessor(const std::string& filename,
- const isc::dns::RRClass& rrclass);
-
- /**
- * \brief Constructor
- *
- * Same as the other version, but takes rrclass as a bare string.
- * we should obsolete the other version and unify the constructor to
- * this version; the SQLite3Accessor is expected to be "dumb" and
- * shouldn't care about DNS specific information such as RRClass.
+ * \param rrclass Textual representation of RR class ("IN", "CH", etc),
+ * specifying which class of data it should serve (while the database
+ * file can contain multiple classes of data, a single accessor can
+ * work with only one class).
*/
SQLite3Accessor(const std::string& filename, const std::string& rrclass);
@@ -191,6 +183,33 @@ private:
const std::string database_name_;
};
+/// \brief Creates an instance of the SQlite3 datasource client
+///
+/// Currently the configuration passed here must be a MapElement, containing
+/// one item called "database_file", whose value is a string
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+///
+/// \param config The configuration for the datasource instance
+/// \param error This string will be set to an error message if an error occurs
+/// during initialization
+/// \return An instance of the sqlite3 datasource client, or NULL if there was
+/// an error
+#ifdef _WIN32
+extern "C" DataSourceClient* SQLCreateInstance(isc::data::ConstElementPtr config,
+ std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void SQLDestroyInstance(DataSourceClient* instance);
+#else
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config,
+ std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+#endif
+
}
}
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 48cbc76..3d2ba6d 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = . testdata
+SUBDIRS = testdata
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/lib/dns
@@ -29,12 +29,23 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += cache_unittest.cc
run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
run_unittests_SOURCES += rbtree_unittest.cc
-run_unittests_SOURCES += zonetable_unittest.cc
-run_unittests_SOURCES += memory_datasrc_unittest.cc
+#run_unittests_SOURCES += zonetable_unittest.cc
+#run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
run_unittests_SOURCES += database_unittest.cc
run_unittests_SOURCES += client_unittest.cc
run_unittests_SOURCES += sqlite3_accessor_unittest.cc
+if !USE_STATIC_LINK
+# This test uses dynamically loadable module. It will cause various
+# troubles with static link such as "missing" symbols in the static object
+# for the module. As a workaround we disable this particualr test
+# in this case.
+run_unittests_SOURCES += factory_unittest.cc
+endif
+# for the dlopened types we have tests for, we also need to include the
+# sources
+run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
+#run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 633090c..774a52e 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -834,8 +834,7 @@ public:
class TestSQLite3Accessor : public SQLite3Accessor {
public:
TestSQLite3Accessor() : SQLite3Accessor(
- TEST_DATA_BUILDDIR "/rwtest.sqlite3.copied",
- RRClass::IN())
+ TEST_DATA_BUILDDIR "/rwtest.sqlite3.copied", "IN")
{
startUpdateZone("example.org.", true);
string columns[ADD_COLUMN_COUNT];
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
new file mode 100644
index 0000000..0133508
--- /dev/null
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/scoped_ptr.hpp>
+
+#include <datasrc/factory.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <dns/rrclass.h>
+#include <cc/data.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using namespace isc::data;
+
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+namespace {
+
+TEST(FactoryTest, sqlite3ClientBadConfig) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", Element::create("/foo/bar/doesnotexist"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", Element::create(SQLITE_DBFILE_EXAMPLE_ORG));
+ DataSourceClientContainer dsc("sqlite3", config);
+
+ DataSourceClient::FindResult result1(
+ dsc.getInstance().findZone(isc::dns::Name("example.org.")));
+ ASSERT_EQ(result::SUCCESS, result1.code);
+
+ DataSourceClient::FindResult result2(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result2.code);
+
+ ZoneIteratorPtr iterator(dsc.getInstance().getIterator(
+ isc::dns::Name("example.org.")));
+
+ ZoneUpdaterPtr updater(dsc.getInstance().getUpdater(
+ isc::dns::Name("example.org."), false));
+}
+
+TEST(FactoryTest, memoryClient) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer client("memory", config),
+ DataSourceError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", Element::create("memory"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("zones", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("zones", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("zones", Element::createList());
+ DataSourceClientContainer dsc("memory", config);
+
+ // Once it is able to load some zones, we should add a few tests
+ // here to see that it does.
+ DataSourceClient::FindResult result(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result.code);
+
+ ASSERT_THROW(dsc.getInstance().getIterator(isc::dns::Name("example.org.")),
+ DataSourceError);
+
+ ASSERT_THROW(dsc.getInstance().getUpdater(isc::dns::Name("no.such.zone."),
+ false), isc::NotImplemented);
+}
+
+TEST(FactoryTest, badType) {
+ ASSERT_THROW(DataSourceClientContainer("foo", ElementPtr()),
+ DataSourceError);
+}
+
+} // end anonymous namespace
+
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 9013864..11588f7 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -56,36 +56,34 @@ const char* SQLITE_NEW_DBFILE = TEST_DATA_BUILDDIR "/newdb.sqlite3";
// Opening works (the content is tested in different tests)
TEST(SQLite3Open, common) {
- EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE,
- RRClass::IN()));
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE, "IN"));
}
// The file can't be opened
TEST(SQLite3Open, notExist) {
- EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_NOTEXIST,
- RRClass::IN()), SQLite3Error);
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_NOTEXIST, "IN"),
+ SQLite3Error);
}
// It rejects broken DB
TEST(SQLite3Open, brokenDB) {
- EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_BROKENDB,
- RRClass::IN()), SQLite3Error);
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_BROKENDB, "IN"),
+ SQLite3Error);
}
// Test we can create the schema on the fly
TEST(SQLite3Open, memoryDB) {
- EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY,
- RRClass::IN()));
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY, "IN"));
}
// Test fixture for querying the db
class SQLite3AccessorTest : public ::testing::Test {
public:
SQLite3AccessorTest() {
- initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::IN());
+ initAccessor(SQLITE_DBFILE_EXAMPLE, "IN");
}
// So it can be re-created with different data
- void initAccessor(const std::string& filename, const RRClass& rrclass) {
+ void initAccessor(const std::string& filename, const string& rrclass) {
accessor.reset(new SQLite3Accessor(filename, rrclass));
}
// The tested accessor
@@ -111,14 +109,14 @@ TEST_F(SQLite3AccessorTest, noZone) {
// This zone is there, but in different class
TEST_F(SQLite3AccessorTest, noClass) {
- initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::CH());
+ initAccessor(SQLITE_DBFILE_EXAMPLE, "CH");
EXPECT_FALSE(accessor->getZone("example.com.").first);
}
// This tests the iterator context
TEST_F(SQLite3AccessorTest, iterator) {
// Our test zone is conveniently small, but not empty
- initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, RRClass::IN());
+ initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, "IN");
const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
ASSERT_TRUE(zone_info.first);
@@ -206,12 +204,12 @@ TEST_F(SQLite3AccessorTest, iterator) {
}
TEST(SQLite3Open, getDBNameExample2) {
- SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE2, RRClass::IN());
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE2, "IN");
EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, accessor.getDBName());
}
TEST(SQLite3Open, getDBNameExampleROOT) {
- SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE_ROOT, RRClass::IN());
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE_ROOT, "IN");
EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
}
@@ -409,7 +407,7 @@ bool isReadable(const char* filename) {
TEST_F(SQLite3Create, creationtest) {
ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
// Should simply be created
- SQLite3Accessor accessor(SQLITE_NEW_DBFILE, RRClass::IN());
+ SQLite3Accessor accessor(SQLITE_NEW_DBFILE, "IN");
ASSERT_TRUE(isReadable(SQLITE_NEW_DBFILE));
}
@@ -421,12 +419,12 @@ TEST_F(SQLite3Create, emptytest) {
ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
// empty, but not locked, so creating it now should work
- SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, RRClass::IN());
+ SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, "IN");
sqlite3_close(db);
// should work now that we closed it
- SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, RRClass::IN());
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, "IN");
}
TEST_F(SQLite3Create, lockedtest) {
@@ -438,13 +436,13 @@ TEST_F(SQLite3Create, lockedtest) {
sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL, NULL);
// should not be able to open it
- EXPECT_THROW(SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, RRClass::IN()),
+ EXPECT_THROW(SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, "IN"),
SQLite3Error);
sqlite3_exec(db, "ROLLBACK TRANSACTION", NULL, NULL, NULL);
// should work now that we closed it
- SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, RRClass::IN());
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, "IN");
}
TEST_F(SQLite3AccessorTest, clone) {
@@ -511,11 +509,11 @@ protected:
isc_throw(isc::Exception,
"Error setting up; command failed: " << install_cmd);
};
- initAccessor(TEST_DATA_BUILDDIR "/test.sqlite3.copied", RRClass::IN());
+ initAccessor(TEST_DATA_BUILDDIR "/test.sqlite3.copied", "IN");
zone_id = accessor->getZone("example.com.").second;
another_accessor.reset(new SQLite3Accessor(
TEST_DATA_BUILDDIR "/test.sqlite3.copied",
- RRClass::IN()));
+ "IN"));
expected_stored.push_back(common_expected_data);
}
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 3d4a663..0d2bffd 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -24,6 +24,9 @@ EXTRA_DIST += rdata/generic/cname_5.h
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.cc
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.h
EXTRA_DIST += rdata/generic/detail/txt_like.h
+EXTRA_DIST += rdata/generic/detail/ds_like.h
+EXTRA_DIST += rdata/generic/dlv_32769.cc
+EXTRA_DIST += rdata/generic/dlv_32769.h
EXTRA_DIST += rdata/generic/dname_39.cc
EXTRA_DIST += rdata/generic/dname_39.h
EXTRA_DIST += rdata/generic/dnskey_48.cc
@@ -107,6 +110,7 @@ libdns___la_SOURCES += character_string.h character_string.cc
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.h
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.cc
libdns___la_SOURCES += rdata/generic/detail/txt_like.h
+libdns___la_SOURCES += rdata/generic/detail/ds_like.h
libdns___la_CPPFLAGS = $(AM_CPPFLAGS)
# Most applications of libdns++ will only implicitly rely on libcryptolink,
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index 6012153..2349401 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -78,7 +78,7 @@ PyObject* Message_makeResponse(s_Message* self);
PyObject* Message_toText(s_Message* self);
PyObject* Message_str(PyObject* self);
PyObject* Message_toWire(s_Message* self, PyObject* args);
-PyObject* Message_fromWire(PyObject* const pyself, PyObject* args);
+PyObject* Message_fromWire(PyObject* pyself, PyObject* args);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -642,8 +642,8 @@ Message_toWire(s_Message* self, PyObject* args) {
}
PyObject*
-Message_fromWire(PyObject* const pyself, PyObject* args) {
- s_Message* self = static_cast<s_Message*>(pyself);
+Message_fromWire(PyObject* pyself, PyObject* args) {
+ s_Message* const self = static_cast<s_Message*>(pyself);
const char* b;
Py_ssize_t len;
unsigned int options = Message::PARSE_DEFAULT;
diff --git a/src/lib/dns/rdata/generic/detail/ds_like.h b/src/lib/dns/rdata/generic/detail/ds_like.h
new file mode 100644
index 0000000..b5a35cd
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/ds_like.h
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DS_LIKE_H
+#define __DS_LIKE_H 1
+
+#include <stdint.h>
+
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+namespace isc {
+namespace dns {
+namespace rdata {
+namespace generic {
+namespace detail {
+
+/// \brief \c rdata::DSLikeImpl class represents the DS-like RDATA for DS
+/// and DLV types.
+///
+/// This class implements the basic interfaces inherited by the DS and DLV
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to DS-like RDATA.
+template <class Type, uint16_t typeCode> class DSLikeImpl {
+ // Common sequence of toWire() operations used for the two versions of
+ // toWire().
+ template <typename Output>
+ void
+ toWireCommon(Output& output) const {
+ output.writeUint16(tag_);
+ output.writeUint8(algorithm_);
+ output.writeUint8(digest_type_);
+ output.writeData(&digest_[0], digest_.size());
+ }
+
+public:
+ /// \brief Constructor from string.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataText is thrown if the method cannot process the
+ /// parameter data for any of the number of reasons.
+ DSLikeImpl(const std::string& ds_str) {
+ std::istringstream iss(ds_str);
+ // peekc should be of iss's char_type for isspace to work
+ std::istringstream::char_type peekc;
+ std::stringbuf digestbuf;
+ uint32_t tag, algorithm, digest_type;
+
+ iss >> tag >> algorithm >> digest_type;
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText,
+ "Invalid " << RRType(typeCode) << " text");
+ }
+ if (tag > 0xffff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " tag out of range");
+ }
+ if (algorithm > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " algorithm out of range");
+ }
+ if (digest_type > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " digest type out of range");
+ }
+
+ iss.read(&peekc, 1);
+ if (!iss.good() || !isspace(peekc, iss.getloc())) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " presentation format error");
+ }
+
+ iss >> &digestbuf;
+
+ tag_ = tag;
+ algorithm_ = algorithm;
+ digest_type_ = digest_type;
+ decodeHex(digestbuf.str(), digest_);
+ }
+
+ /// \brief Constructor from wire-format data.
+ ///
+ /// \param buffer A buffer storing the wire format data.
+ /// \param rdata_len The length of the RDATA in bytes, normally expected
+ /// to be the value of the RDLENGTH field of the corresponding RR.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataLength is thrown if the input data is too short for the
+ /// type.
+ DSLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 4) {
+ isc_throw(InvalidRdataLength, RRType(typeCode) << " too short");
+ }
+
+ tag_ = buffer.readUint16();
+ algorithm_ = buffer.readUint8();
+ digest_type_ = buffer.readUint8();
+
+ rdata_len -= 4;
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+ }
+
+ /// \brief The copy constructor.
+ ///
+ /// Trivial for now, we could've used the default one.
+ DSLikeImpl(const DSLikeImpl& source) {
+ digest_ = source.digest_;
+ tag_ = source.tag_;
+ algorithm_ = source.algorithm_;
+ digest_type_ = source.digest_type_;
+ }
+
+ /// \brief Convert the DS-like data to a string.
+ ///
+ /// \return A \c string object that represents the DS-like data.
+ std::string
+ toText() const {
+ using namespace boost;
+ return (lexical_cast<string>(static_cast<int>(tag_)) +
+ " " + lexical_cast<string>(static_cast<int>(algorithm_)) +
+ " " + lexical_cast<string>(static_cast<int>(digest_type_)) +
+ " " + encodeHex(digest_));
+ }
+
+ /// \brief Render the DS-like data in the wire format to an OutputBuffer
+ /// object.
+ ///
+ /// \param buffer An output buffer to store the wire data.
+ void
+ toWire(OutputBuffer& buffer) const {
+ toWireCommon(buffer);
+ }
+
+ /// \brief Render the DS-like data in the wire format to an
+ /// AbstractMessageRenderer object.
+ ///
+ /// \param renderer A renderer object to send the wire data to.
+ void
+ toWire(AbstractMessageRenderer& renderer) const {
+ toWireCommon(renderer);
+ }
+
+ /// \brief Compare two instances of DS-like RDATA.
+ ///
+ /// It is up to the caller to make sure that \c other is an object of the
+ /// same \c DSLikeImpl class.
+ ///
+ /// \param other the right-hand operand to compare against.
+ /// \return < 0 if \c this would be sorted before \c other.
+ /// \return 0 if \c this is identical to \c other in terms of sorting
+ /// order.
+ /// \return > 0 if \c this would be sorted after \c other.
+ int
+ compare(const DSLikeImpl& other_ds) const {
+ if (tag_ != other_ds.tag_) {
+ return (tag_ < other_ds.tag_ ? -1 : 1);
+ }
+ if (algorithm_ != other_ds.algorithm_) {
+ return (algorithm_ < other_ds.algorithm_ ? -1 : 1);
+ }
+ if (digest_type_ != other_ds.digest_type_) {
+ return (digest_type_ < other_ds.digest_type_ ? -1 : 1);
+ }
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_ds.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_ds.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len)
+ ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+ }
+
+ /// \brief Accessors
+ uint16_t
+ getTag() const {
+ return (tag_);
+ }
+
+private:
+ // straightforward representation of DS RDATA fields
+ uint16_t tag_;
+ uint8_t algorithm_;
+ uint8_t digest_type_;
+ std::vector<uint8_t> digest_;
+};
+
+}
+}
+}
+}
+}
+#endif // __DS_LIKE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/txt_like.h b/src/lib/dns/rdata/generic/detail/txt_like.h
index 392a8ce..a0ab7ac 100644
--- a/src/lib/dns/rdata/generic/detail/txt_like.h
+++ b/src/lib/dns/rdata/generic/detail/txt_like.h
@@ -23,8 +23,24 @@
using namespace std;
using namespace isc::util;
+/// \brief \c rdata::TXTLikeImpl class represents the TXT-like RDATA for TXT
+/// and SPF types.
+///
+/// This class implements the basic interfaces inherited by the TXT and SPF
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to TXT-like RDATA.
template<class Type, uint16_t typeCode>class TXTLikeImpl {
public:
+ /// \brief Constructor from wire-format data.
+ ///
+ /// \param buffer A buffer storing the wire format data.
+ /// \param rdata_len The length of the RDATA in bytes, normally expected
+ /// to be the value of the RDLENGTH field of the corresponding RR.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataLength is thrown if rdata_len exceeds the maximum.
+ /// \c DNSMessageFORMERR is thrown if the RR is misformed.
TXTLikeImpl(InputBuffer& buffer, size_t rdata_len) {
if (rdata_len > MAX_RDLENGTH) {
isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
@@ -52,6 +68,14 @@ public:
} while (rdata_len > 0);
}
+ /// \brief Constructor from string.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c CharStringTooLong is thrown if the parameter string length exceeds
+ /// maximum.
+ /// \c InvalidRdataText is thrown if the method cannot process the
+ /// parameter data.
explicit TXTLikeImpl(const std::string& txtstr) {
// TBD: this is a simple, incomplete implementation that only supports
// a single character-string.
@@ -86,10 +110,17 @@ public:
string_list_.push_back(data);
}
+ /// \brief The copy constructor.
+ ///
+ /// Trivial for now, we could've used the default one.
TXTLikeImpl(const TXTLikeImpl& other) :
string_list_(other.string_list_)
{}
+ /// \brief Render the TXT-like data in the wire format to an OutputBuffer
+ /// object.
+ ///
+ /// \param buffer An output buffer to store the wire data.
void
toWire(OutputBuffer& buffer) const {
for (vector<vector<uint8_t> >::const_iterator it =
@@ -101,6 +132,11 @@ public:
}
}
+ /// \brief Render the TXT-like data in the wire format to an
+ /// AbstractMessageRenderer object.
+ ///
+ /// \param buffer An output AbstractMessageRenderer to send the wire data
+ /// to.
void
toWire(AbstractMessageRenderer& renderer) const {
for (vector<vector<uint8_t> >::const_iterator it =
@@ -112,6 +148,9 @@ public:
}
}
+ /// \brief Convert the TXT-like data to a string.
+ ///
+ /// \return A \c string object that represents the TXT-like data.
string
toText() const {
string s;
@@ -134,20 +173,33 @@ public:
return (s);
}
+ /// \brief Compare two instances of TXT-like RDATA.
+ ///
+ /// It is up to the caller to make sure that \c other is an object of the
+ /// same \c TXTLikeImpl class.
+ ///
+ /// \param other the right-hand operand to compare against.
+ /// \return < 0 if \c this would be sorted before \c other.
+ /// \return 0 if \c this is identical to \c other in terms of sorting
+ /// order.
+ /// \return > 0 if \c this would be sorted after \c other.
int
compare(const TXTLikeImpl& other) const {
// This implementation is not efficient. Revisit this (TBD).
OutputBuffer this_buffer(0);
toWire(this_buffer);
+ uint8_t const* const this_data = (uint8_t const*)this_buffer.getData();
size_t this_len = this_buffer.getLength();
OutputBuffer other_buffer(0);
other.toWire(other_buffer);
+ uint8_t const* const other_data
+ = (uint8_t const*)other_buffer.getData();
const size_t other_len = other_buffer.getLength();
const size_t cmplen = min(this_len, other_len);
- const int cmp = memcmp(this_buffer.getData(), other_buffer.getData(),
- cmplen);
+ const int cmp = memcmp(this_data, other_data, cmplen);
+
if (cmp != 0) {
return (cmp);
} else {
diff --git a/src/lib/dns/rdata/generic/dlv_32769.cc b/src/lib/dns/rdata/generic/dlv_32769.cc
new file mode 100644
index 0000000..9887aa8
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <dns/rdata/generic/detail/ds_like.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const string& ds_str) :
+ impl_(new DLVImpl(ds_str))
+{}
+
+/// \brief Constructor from wire-format data.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DLVImpl(buffer, rdata_len))
+{}
+
+/// \brief Copy constructor
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const DLV& source) :
+ Rdata(), impl_(new DLVImpl(*source.impl_))
+{}
+
+/// \brief Assignment operator
+///
+/// PIMPL-induced logic
+DLV&
+DLV::operator=(const DLV& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ DLVImpl* newimpl = new DLVImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+/// \brief Destructor
+///
+/// Deallocates an internal resource.
+DLV::~DLV() {
+ delete impl_;
+}
+
+/// \brief Convert the \c DLV to a string.
+///
+/// A pass-thru to the corresponding implementation method.
+string
+DLV::toText() const {
+ return (impl_->toText());
+}
+
+/// \brief Render the \c DLV in the wire format to a OutputBuffer object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(OutputBuffer& buffer) const {
+ impl_->toWire(buffer);
+}
+
+/// \brief Render the \c DLV in the wire format to a AbstractMessageRenderer
+/// object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(AbstractMessageRenderer& renderer) const {
+ impl_->toWire(renderer);
+}
+
+/// \brief Compare two instances of \c DLV RDATA.
+///
+/// The type check is performed here. Otherwise, a pass-thru to the
+/// corresponding implementation method.
+int
+DLV::compare(const Rdata& other) const {
+ const DLV& other_ds = dynamic_cast<const DLV&>(other);
+
+ return (impl_->compare(*other_ds.impl_));
+}
+
+/// \brief Tag accessor
+uint16_t
+DLV::getTag() const {
+ return (impl_->getTag());
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/dlv_32769.h b/src/lib/dns/rdata/generic/dlv_32769.h
new file mode 100644
index 0000000..86cd98c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+
+/// \brief \c rdata::generic::DLV class represents the DLV RDATA as defined in
+/// RFC4431.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DLV RDATA.
+class DLV : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ DLV& operator=(const DLV& source);
+
+ /// \brief The destructor.
+ ~DLV();
+
+ /// \brief Return the value of the Tag field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getTag() const;
+private:
+ typedef detail::DSLikeImpl<DLV, 32769> DLVImpl;
+ DLVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/ds_43.cc b/src/lib/dns/rdata/generic/ds_43.cc
index 000f48f..20b62dc 100644
--- a/src/lib/dns/rdata/generic/ds_43.cc
+++ b/src/lib/dns/rdata/generic/ds_43.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,90 +12,32 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <iostream>
#include <string>
-#include <sstream>
-#include <vector>
-
-#include <boost/lexical_cast.hpp>
#include <util/buffer.h>
#include <util/encode/hex.h>
#include <dns/messagerenderer.h>
-#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
-#include <stdio.h>
-#include <time.h>
+#include <dns/rdata/generic/detail/ds_like.h>
using namespace std;
using namespace isc::util;
using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-struct DSImpl {
- // straightforward representation of DS RDATA fields
- DSImpl(uint16_t tag, uint8_t algorithm, uint8_t digest_type,
- const vector<uint8_t>& digest) :
- tag_(tag), algorithm_(algorithm), digest_type_(digest_type),
- digest_(digest)
- {}
-
- uint16_t tag_;
- uint8_t algorithm_;
- uint8_t digest_type_;
- const vector<uint8_t> digest_;
-private:
- // silence MSVC warning C4512: assignment operator could not be generated
- DSImpl& operator=(DSImpl const&);
-};
-
DS::DS(const string& ds_str) :
- impl_(NULL)
-{
- istringstream iss(ds_str);
- unsigned int tag, algorithm, digest_type;
- stringbuf digestbuf;
-
- iss >> tag >> algorithm >> digest_type >> &digestbuf;
- if (iss.bad() || iss.fail()) {
- isc_throw(InvalidRdataText, "Invalid DS text");
- }
- if (tag > 0xffff) {
- isc_throw(InvalidRdataText, "DS tag out of range");
- }
- if (algorithm > 0xff) {
- isc_throw(InvalidRdataText, "DS algorithm out of range");
- }
- if (digest_type > 0xff) {
- isc_throw(InvalidRdataText, "DS digest type out of range");
- }
-
- vector<uint8_t> digest;
- decodeHex(digestbuf.str(), digest);
-
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
-
-DS::DS(InputBuffer& buffer, size_t rdata_len) {
- if (rdata_len < 4) {
- isc_throw(InvalidRdataLength, "DS too short");
- }
-
- uint16_t tag = buffer.readUint16();
- uint16_t algorithm = buffer.readUint8();
- uint16_t digest_type = buffer.readUint8();
-
- rdata_len -= 4;
- vector<uint8_t> digest(rdata_len);
- buffer.readData(&digest[0], rdata_len);
+ impl_(new DSImpl(ds_str))
+{}
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
+DS::DS(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DSImpl(buffer, rdata_len))
+{}
DS::DS(const DS& source) :
Rdata(), impl_(new DSImpl(*source.impl_))
@@ -120,57 +62,29 @@ DS::~DS() {
string
DS::toText() const {
- using namespace boost;
- return (lexical_cast<string>(static_cast<int>(impl_->tag_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->algorithm_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->digest_type_)) +
- " " + encodeHex(impl_->digest_));
+ return (impl_->toText());
}
void
DS::toWire(OutputBuffer& buffer) const {
- buffer.writeUint16(impl_->tag_);
- buffer.writeUint8(impl_->algorithm_);
- buffer.writeUint8(impl_->digest_type_);
- buffer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(buffer);
}
void
DS::toWire(AbstractMessageRenderer& renderer) const {
- renderer.writeUint16(impl_->tag_);
- renderer.writeUint8(impl_->algorithm_);
- renderer.writeUint8(impl_->digest_type_);
- renderer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(renderer);
}
int
DS::compare(const Rdata& other) const {
const DS& other_ds = dynamic_cast<const DS&>(other);
- if (impl_->tag_ != other_ds.impl_->tag_) {
- return (impl_->tag_ < other_ds.impl_->tag_ ? -1 : 1);
- }
- if (impl_->algorithm_ != other_ds.impl_->algorithm_) {
- return (impl_->algorithm_ < other_ds.impl_->algorithm_ ? -1 : 1);
- }
- if (impl_->digest_type_ != other_ds.impl_->digest_type_) {
- return (impl_->digest_type_ < other_ds.impl_->digest_type_ ? -1 : 1);
- }
-
- size_t this_len = impl_->digest_.size();
- size_t other_len = other_ds.impl_->digest_.size();
- size_t cmplen = min(this_len, other_len);
- int cmp = memcmp(&impl_->digest_[0], &other_ds.impl_->digest_[0], cmplen);
- if (cmp != 0) {
- return (cmp);
- } else {
- return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
- }
+ return (impl_->compare(*other_ds.impl_));
}
uint16_t
DS::getTag() const {
- return (impl_->tag_);
+ return (impl_->getTag());
}
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/ds_43.h b/src/lib/dns/rdata/generic/ds_43.h
index 03b19a0..2697f51 100644
--- a/src/lib/dns/rdata/generic/ds_43.h
+++ b/src/lib/dns/rdata/generic/ds_43.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+// BEGIN_HEADER_GUARD
+
#include <stdint.h>
#include <string>
@@ -21,8 +23,6 @@
#include <dns/rrttl.h>
#include <dns/rdata.h>
-// BEGIN_HEADER_GUARD
-
// BEGIN_ISC_NAMESPACE
// BEGIN_COMMON_DECLARATIONS
@@ -30,20 +30,41 @@
// BEGIN_RDATA_NAMESPACE
-struct DSImpl;
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+/// \brief \c rdata::generic::DS class represents the DS RDATA as defined in
+/// RFC3658.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DS RDATA.
class DS : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
DS& operator=(const DS& source);
+
+ /// \brief The destructor.
~DS();
+ /// \brief Return the value of the Tag field.
///
- /// Specialized methods
- ///
+ /// This method never throws an exception.
uint16_t getTag() const;
private:
+ typedef detail::DSLikeImpl<DS, 43> DSImpl;
DSImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/spf_99.cc b/src/lib/dns/rdata/generic/spf_99.cc
index 492de98..aa3e4a1 100644
--- a/src/lib/dns/rdata/generic/spf_99.cc
+++ b/src/lib/dns/rdata/generic/spf_99.cc
@@ -30,8 +30,17 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class. The semantics of the class is provided by
+/// a copy of instantiated TXTLikeImpl class common to both TXT and SPF.
+
#include <dns/rdata/generic/detail/txt_like.h>
+/// \brief The assignment operator
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
SPF&
SPF::operator=(const SPF& source) {
if (impl_ == source.impl_) {
@@ -45,37 +54,72 @@ SPF::operator=(const SPF& source) {
return (*this);
}
+/// \brief The destructor
SPF::~SPF() {
delete impl_;
}
+/// \brief Constructor from wire-format data.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
SPF::SPF(InputBuffer& buffer, size_t rdata_len) :
impl_(new SPFImpl(buffer, rdata_len))
{}
+/// \brief Constructor from string.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
SPF::SPF(const std::string& txtstr) :
impl_(new SPFImpl(txtstr))
{}
+/// \brief Copy constructor
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
SPF::SPF(const SPF& other) :
Rdata(), impl_(new SPFImpl(*other.impl_))
{}
+/// \brief Render the \c SPF in the wire format to a OutputBuffer object
+///
+/// \return is the return of the corresponding implementation method.
void
SPF::toWire(OutputBuffer& buffer) const {
impl_->toWire(buffer);
}
+/// \brief Render the \c SPF in the wire format to an AbstractMessageRenderer
+/// object
+///
+/// \return is the return of the corresponding implementation method.
void
SPF::toWire(AbstractMessageRenderer& renderer) const {
impl_->toWire(renderer);
}
+/// \brief Convert the \c SPF to a string.
+///
+/// \return is the return of the corresponding implementation method.
string
SPF::toText() const {
return (impl_->toText());
}
+/// \brief Compare two instances of \c SPF RDATA.
+///
+/// This method compares \c this and the \c other \c SPF objects.
+///
+/// This method is expected to be used in a polymorphic way, and the
+/// parameter to compare against is therefore of the abstract \c Rdata class.
+/// However, comparing two \c Rdata objects of different RR types
+/// is meaningless, and \c other must point to a \c SPF object;
+/// otherwise, the standard \c bad_cast exception will be thrown.
+///
+/// \param other the right-hand operand to compare against.
+/// \return is the return of the corresponding implementation method.
int
SPF::compare(const Rdata& other) const {
const SPF& other_txt = dynamic_cast<const SPF&>(other);
diff --git a/src/lib/dns/rdata/generic/spf_99.h b/src/lib/dns/rdata/generic/spf_99.h
index 956adb9..04ac99b 100644
--- a/src/lib/dns/rdata/generic/spf_99.h
+++ b/src/lib/dns/rdata/generic/spf_99.h
@@ -30,14 +30,40 @@
template<class Type, uint16_t typeCode> class TXTLikeImpl;
+/// \brief \c rdata::SPF class represents the SPF RDATA as defined %in
+/// RFC4408.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class. The semantics of the class is provided by
+/// a copy of instantiated TXTLikeImpl class common to both TXT and SPF.
class SPF : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
SPF& operator=(const SPF& source);
+
+ /// \brief The destructor.
~SPF();
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return a reference to the data strings
+ ///
+ /// This method never throws an exception.
+ const std::vector<std::vector<uint8_t> >& getString() const;
+
private:
typedef TXTLikeImpl<SPF, 99> SPFImpl;
SPFImpl* impl_;
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
index 0a9a23c..f0c4aca 100644
--- a/src/lib/dns/rdata/in_1/dhcid_49.cc
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
#include <exceptions/exceptions.h>
#include <util/buffer.h>
-#include <util/encode/hex.h>
+#include <util/encode/base64.h>
#include <dns/exceptions.h>
#include <dns/messagerenderer.h>
#include <dns/rdata.h>
@@ -52,7 +52,7 @@ DHCID::DHCID(const string& dhcid_str) {
stringbuf digestbuf;
iss >> &digestbuf;
- isc::util::encode::decodeHex(digestbuf.str(), digest_);
+ isc::util::encode::decodeBase64(digestbuf.str(), digest_);
// RFC4701 states DNS software should consider the RDATA section to
// be opaque, but there must be at least three bytes in the data:
@@ -112,7 +112,7 @@ DHCID::toWire(AbstractMessageRenderer& renderer) const {
/// \return A string representation of \c DHCID.
string
DHCID::toText() const {
- return (isc::util::encode::encodeHex(digest_));
+ return (isc::util::encode::encodeBase64(digest_));
}
/// \brief Compare two instances of \c DHCID RDATA.
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.h b/src/lib/dns/rdata/in_1/dhcid_49.h
index 919395f..90f5fab 100644
--- a/src/lib/dns/rdata/in_1/dhcid_49.h
+++ b/src/lib/dns/rdata/in_1/dhcid_49.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index 5f90cea..ceeb3b8 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -29,13 +29,15 @@ run_unittests_SOURCES += rdata_unittest.h rdata_unittest.cc
run_unittests_SOURCES += rdatafields_unittest.cc
run_unittests_SOURCES += rdata_in_a_unittest.cc rdata_in_aaaa_unittest.cc
run_unittests_SOURCES += rdata_ns_unittest.cc rdata_soa_unittest.cc
-run_unittests_SOURCES += rdata_txt_unittest.cc rdata_mx_unittest.cc
+run_unittests_SOURCES += rdata_txt_like_unittest.cc
+run_unittests_SOURCES += rdata_mx_unittest.cc
run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
run_unittests_SOURCES += rdata_dname_unittest.cc
run_unittests_SOURCES += rdata_afsdb_unittest.cc
run_unittests_SOURCES += rdata_opt_unittest.cc
+run_unittests_SOURCES += rdata_dhcid_unittest.cc
run_unittests_SOURCES += rdata_dnskey_unittest.cc
-run_unittests_SOURCES += rdata_ds_unittest.cc
+run_unittests_SOURCES += rdata_ds_like_unittest.cc
run_unittests_SOURCES += rdata_nsec_unittest.cc
run_unittests_SOURCES += rdata_nsec3_unittest.cc
run_unittests_SOURCES += rdata_nsecbitmap_unittest.cc
diff --git a/src/lib/dns/tests/rdata_dhcid_unittest.cc b/src/lib/dns/tests/rdata_dhcid_unittest.cc
new file mode 100644
index 0000000..9df7043
--- /dev/null
+++ b/src/lib/dns/tests/rdata_dhcid_unittest.cc
@@ -0,0 +1,111 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/rdataclass.h>
+#include <util/encode/base64.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata;
+
+namespace {
+
+const string string_dhcid(
+ "0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=");
+
+const in::DHCID rdata_dhcid(string_dhcid);
+
+class Rdata_DHCID_Test : public RdataTest {
+};
+
+TEST_F(Rdata_DHCID_Test, createFromString) {
+ const in::DHCID rdata_dhcid2(string_dhcid);
+ EXPECT_EQ(0, rdata_dhcid2.compare(rdata_dhcid));
+}
+
+TEST_F(Rdata_DHCID_Test, badBase64) {
+ EXPECT_THROW(const in::DHCID rdata_dhcid_bad("00"), isc::BadValue);
+}
+
+TEST_F(Rdata_DHCID_Test, badLength) {
+ EXPECT_THROW(const in::DHCID rdata_dhcid_bad("MDA="), InvalidRdataLength);
+}
+
+TEST_F(Rdata_DHCID_Test, copy) {
+ const in::DHCID rdata_dhcid2(rdata_dhcid);
+ EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid2));
+}
+
+TEST_F(Rdata_DHCID_Test, createFromWire) {
+ EXPECT_EQ(0, rdata_dhcid.compare(
+ *rdataFactoryFromFile(RRType("DHCID"), RRClass("IN"),
+ "rdata_dhcid_fromWire")));
+ // TBD: more tests
+}
+
+TEST_F(Rdata_DHCID_Test, toWireRenderer) {
+ rdata_dhcid.toWire(renderer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_dhcid_toWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, renderer.getData(),
+ renderer.getLength(), &data[0], data.size());
+}
+
+TEST_F(Rdata_DHCID_Test, toWireBuffer) {
+ rdata_dhcid.toWire(obuffer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_dhcid_toWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), &data[0], data.size());
+}
+
+TEST_F(Rdata_DHCID_Test, toText) {
+ EXPECT_EQ(string_dhcid, rdata_dhcid.toText());
+}
+
+TEST_F(Rdata_DHCID_Test, getDHCIDDigest) {
+ const string string_dhcid1(encodeBase64(rdata_dhcid.getDigest()));
+
+ EXPECT_EQ(string_dhcid, string_dhcid1);
+}
+
+TEST_F(Rdata_DHCID_Test, compare) {
+ // trivial case: self equivalence
+ EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid));
+
+ in::DHCID rdata_dhcid1("0YLQvtC/0L7Qu9GPINC00LLQsCDRgNGD0LHQu9GP");
+ in::DHCID rdata_dhcid2("0YLQvtC/0L7Qu9GPINGC0YDQuCDRgNGD0LHQu9GP");
+ in::DHCID rdata_dhcid3("0YLQvtC/0L7Qu9GPINGH0LXRgtGL0YDQtSDRgNGD0LHQu9GP");
+
+ EXPECT_LT(rdata_dhcid1.compare(rdata_dhcid2), 0);
+ EXPECT_GT(rdata_dhcid2.compare(rdata_dhcid1), 0);
+
+ EXPECT_LT(rdata_dhcid2.compare(rdata_dhcid3), 0);
+ EXPECT_GT(rdata_dhcid3.compare(rdata_dhcid2), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_dhcid.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_ds_like_unittest.cc b/src/lib/dns/tests/rdata_ds_like_unittest.cc
new file mode 100644
index 0000000..9b29446
--- /dev/null
+++ b/src/lib/dns/tests/rdata_ds_like_unittest.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <string>
+
+#include <util/buffer.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+// hacks to make templates work
+template <class T>
+class RRTYPE : public RRType {
+public:
+ RRTYPE();
+};
+
+template<> RRTYPE<generic::DS>::RRTYPE() : RRType(RRType::DS()) {}
+template<> RRTYPE<generic::DLV>::RRTYPE() : RRType(RRType::DLV()) {}
+
+template <class DS_LIKE>
+class Rdata_DS_LIKE_Test : public RdataTest {
+protected:
+ static DS_LIKE const rdata_ds_like;
+};
+
+string ds_like_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+
+template <class DS_LIKE>
+DS_LIKE const Rdata_DS_LIKE_Test<DS_LIKE>::rdata_ds_like(ds_like_txt);
+
+// The list of types we want to test.
+typedef testing::Types<generic::DS, generic::DLV> Implementations;
+
+TYPED_TEST_CASE(Rdata_DS_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toText_DS_LIKE) {
+ EXPECT_EQ(ds_like_txt, this->rdata_ds_like.toText());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, badText_DS_LIKE) {
+ EXPECT_THROW(const TypeParam ds_like2("99999 5 2 BEEF"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 555 2 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 22222 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 2"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("GARBAGE IN"), InvalidRdataText);
+ // no space between the digest type and the digest.
+ EXPECT_THROW(const TypeParam ds_like2(
+ "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, createFromWire_DS_LIKE) {
+ EXPECT_EQ(0, this->rdata_ds_like.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass::IN(),
+ "rdata_ds_fromWire")));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, assignment_DS_LIKE) {
+ TypeParam copy((string(ds_like_txt)));
+ copy = this->rdata_ds_like;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+
+ // Check if the copied data is valid even after the original is deleted
+ TypeParam* copy2 = new TypeParam(this->rdata_ds_like);
+ TypeParam copy3((string(ds_like_txt)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(this->rdata_ds_like));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, getTag_DS_LIKE) {
+ EXPECT_EQ(12892, this->rdata_ds_like.getTag());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireRenderer) {
+ Rdata_DS_LIKE_Test<TypeParam>::renderer.skip(2);
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->renderer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_ds_fromWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t*>
+ (this->obuffer.getData()) + 2,
+ this->obuffer.getLength() - 2,
+ &data[2], data.size() - 2);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireBuffer) {
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->obuffer);
+}
+
+string ds_like_txt1("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different tag
+string ds_like_txt2("12893 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different algorithm
+string ds_like_txt3("12892 6 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest type
+string ds_like_txt4("12892 5 3 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest
+string ds_like_txt5("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest length
+string ds_like_txt6("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B555");
+
+TYPED_TEST(Rdata_DS_LIKE_Test, compare) {
+ // trivial case: self equivalence
+ EXPECT_EQ(0, TypeParam(ds_like_txt).compare(TypeParam(ds_like_txt)));
+
+ // non-equivalence tests
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt2)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt2).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt3)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt3).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt4)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt4).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt5)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt5).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt6)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt6).compare(TypeParam(ds_like_txt1)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(this->rdata_ds_like.compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_ds_unittest.cc b/src/lib/dns/tests/rdata_ds_unittest.cc
deleted file mode 100644
index 5988620..0000000
--- a/src/lib/dns/tests/rdata_ds_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-
-#include <util/buffer.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-class Rdata_DS_Test : public RdataTest {
- // there's nothing to specialize
-};
-
-string ds_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5");
-const generic::DS rdata_ds(ds_txt);
-
-TEST_F(Rdata_DS_Test, toText_DS) {
- EXPECT_EQ(ds_txt, rdata_ds.toText());
-}
-
-TEST_F(Rdata_DS_Test, badText_DS) {
- EXPECT_THROW(const generic::DS ds2("99999 5 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 555 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 22222 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 2"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("GARBAGE IN"), InvalidRdataText);
-}
-
-// this test currently fails; we must fix it, and then migrate the test to
-// badText_DS
-TEST_F(Rdata_DS_Test, DISABLED_badText_DS) {
- // no space between the digest type and the digest.
- EXPECT_THROW(const generic::DS ds2(
- "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
-}
-
-TEST_F(Rdata_DS_Test, createFromWire_DS) {
- EXPECT_EQ(0, rdata_ds.compare(
- *rdataFactoryFromFile(RRType::DS(), RRClass::IN(),
- "rdata_ds_fromWire")));
-}
-
-TEST_F(Rdata_DS_Test, getTag_DS) {
- EXPECT_EQ(12892, rdata_ds.getTag());
-}
-
-TEST_F(Rdata_DS_Test, toWireRenderer) {
- renderer.skip(2);
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(renderer);
-
- vector<unsigned char> data;
- UnitTestUtil::readWireData("rdata_ds_fromWire", data);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- static_cast<const uint8_t *>(obuffer.getData()) + 2,
- obuffer.getLength() - 2, &data[2], data.size() - 2);
-}
-
-TEST_F(Rdata_DS_Test, toWireBuffer) {
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(obuffer);
-}
-
-TEST_F(Rdata_DS_Test, compare) {
- // trivial case: self equivalence
- EXPECT_EQ(0, generic::DS(ds_txt).compare(generic::DS(ds_txt)));
-
- // TODO: need more tests
-}
-
-}
diff --git a/src/lib/dns/tests/rdata_txt_like_unittest.cc b/src/lib/dns/tests/rdata_txt_like_unittest.cc
new file mode 100644
index 0000000..981265e
--- /dev/null
+++ b/src/lib/dns/tests/rdata_txt_like_unittest.cc
@@ -0,0 +1,261 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is the common code for TXT and SPF tests.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/rdataclass.h>
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+
+template<class T>
+class RRTYPE : public RRType {
+public:
+ RRTYPE();
+};
+
+template<> RRTYPE<generic::TXT>::RRTYPE() : RRType(RRType::TXT()) {}
+template<> RRTYPE<generic::SPF>::RRTYPE() : RRType(RRType::SPF()) {}
+
+namespace {
+const uint8_t wiredata_txt_like[] = {
+ sizeof("Test String") - 1,
+ 'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g'
+};
+
+const uint8_t wiredata_nulltxt[] = { 0 };
+vector<uint8_t> wiredata_longesttxt(256, 'a');
+
+template<class TXT_LIKE>
+class Rdata_TXT_LIKE_Test : public RdataTest {
+protected:
+ Rdata_TXT_LIKE_Test() {
+ wiredata_longesttxt[0] = 255; // adjust length
+ }
+
+ static const TXT_LIKE rdata_txt_like;
+ static const TXT_LIKE rdata_txt_like_empty;
+ static const TXT_LIKE rdata_txt_like_quoted;
+};
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like("Test String");
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like_empty("");
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like_quoted
+ ("\"Test String\"");
+
+// The list of types we want to test.
+typedef testing::Types<generic::TXT, generic::SPF> Implementations;
+
+TYPED_TEST_CASE(Rdata_TXT_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, createFromText) {
+ // normal case is covered in toWireBuffer.
+
+ // surrounding double-quotes shouldn't change the result.
+ EXPECT_EQ(0, this->rdata_txt_like.compare(this->rdata_txt_like_quoted));
+
+ // Null character-string.
+ this->obuffer.clear();
+ TypeParam(string("")).toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ wiredata_nulltxt, sizeof(wiredata_nulltxt));
+
+ // Longest possible character-string.
+ this->obuffer.clear();
+ TypeParam(string(255, 'a')).toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ &wiredata_longesttxt[0], wiredata_longesttxt.size());
+
+ // Too long text for a valid character-string.
+ EXPECT_THROW(TypeParam(string(256, 'a')), CharStringTooLong);
+
+ // The escape character makes the double quote a part of character-string,
+ // so this is invalid input and should be rejected.
+ EXPECT_THROW(TypeParam("\"Test String\\\""), InvalidRdataText);
+
+ // Terminating double-quote is provided, so this is valid, but in this
+ // version of implementation we reject escaped characters.
+ EXPECT_THROW(TypeParam("\"Test String\\\"\""), InvalidRdataText);
+}
+
+void
+makeLargest(vector<uint8_t>& data) {
+ uint8_t ch = 0;
+
+ // create 255 sets of character-strings, each of which has the longest
+ // length (255bytes string + 1-byte length field)
+ for (int i = 0; i < 255; ++i, ++ch) {
+ data.push_back(255);
+ data.insert(data.end(), 255, ch);
+ }
+ // the last character-string should be 255 bytes (including the one-byte
+ // length field) in length so that the total length should be in the range
+ // of 16-bit integers.
+ data.push_back(254);
+ data.insert(data.end(), 254, ch);
+
+ assert(data.size() == 65535);
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, createFromWire) {
+ EXPECT_EQ(0, this->rdata_txt_like.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire1")));
+
+ // Empty character string
+ EXPECT_EQ(0, this->rdata_txt_like_empty.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire2.wire")));
+
+ // Multiple character strings
+ this->obuffer.clear();
+ this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire3.wire")->toWire(this->obuffer);
+ // the result should be 'wiredata_txt' repeated twice
+ vector<uint8_t> expected_data(wiredata_txt_like, wiredata_txt_like +
+ sizeof(wiredata_txt_like));
+ expected_data.insert(expected_data.end(), wiredata_txt_like,
+ wiredata_txt_like + sizeof(wiredata_txt_like));
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ &expected_data[0], expected_data.size());
+
+ // Largest length of data. There's nothing special, but should be
+ // constructed safely, and the content should be identical to the original
+ // data.
+ vector<uint8_t> largest_txt_like_data;
+ makeLargest(largest_txt_like_data);
+ InputBuffer ibuffer(&largest_txt_like_data[0],
+ largest_txt_like_data.size());
+ TypeParam largest_txt_like(ibuffer, largest_txt_like_data.size());
+ this->obuffer.clear();
+ largest_txt_like.toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ &largest_txt_like_data[0],
+ largest_txt_like_data.size());
+
+ // rdlen parameter is out of range. This is a rare event because we'd
+ // normally call the constructor via a polymorphic wrapper, where the
+ // length is validated. But this should be checked explicitly.
+ InputBuffer ibuffer2(&largest_txt_like_data[0],
+ largest_txt_like_data.size());
+ EXPECT_THROW(TypeParam(ibuffer2, 65536), InvalidRdataLength);
+
+ // RDATA is empty, which is invalid for TXT_LIKE.
+ EXPECT_THROW(this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire4.wire"),
+ DNSMessageFORMERR);
+
+ // character-string length is too large, which could cause overrun.
+ EXPECT_THROW(this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toWireBuffer) {
+ this->rdata_txt_like.toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ wiredata_txt_like, sizeof(wiredata_txt_like));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toWireRenderer) {
+ this->rdata_txt_like.toWire(this->renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->renderer.getData(),
+ this->renderer.getLength(),
+ wiredata_txt_like, sizeof(wiredata_txt_like));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toText) {
+ EXPECT_EQ("\"Test String\"", this->rdata_txt_like.toText());
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, assignment) {
+ TypeParam rdata1("assignment1");
+ TypeParam rdata2("assignment2");
+ rdata1 = rdata2;
+ EXPECT_EQ(0, rdata2.compare(rdata1));
+
+ // Check if the copied data is valid even after the original is deleted
+ TypeParam* rdata3 = new TypeParam(rdata1);
+ TypeParam rdata4("assignment3");
+ rdata4 = *rdata3;
+ delete rdata3;
+ EXPECT_EQ(0, rdata4.compare(rdata1));
+
+ // Self assignment
+ rdata2 = rdata2;
+ EXPECT_EQ(0, rdata2.compare(rdata1));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, compare) {
+ string const txt1("aaaaaaaa");
+ string const txt2("aaaaaaaaaa");
+ string const txt3("bbbbbbbb");
+ string const txt4(129, 'a');
+ string const txt5(128, 'b');
+
+ EXPECT_EQ(TypeParam(txt1).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam("").compare(TypeParam(txt1)), 0);
+ EXPECT_GT(TypeParam(txt1).compare(TypeParam("")), 0);
+
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt2)), 0);
+ EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt3)), 0);
+ EXPECT_GT(TypeParam(txt3).compare(TypeParam(txt1)), 0);
+
+ // we're comparing the data raw, starting at the length octet, so a shorter
+ // string sorts before a longer one no matter the lexicopraphical order
+ EXPECT_LT(TypeParam(txt3).compare(TypeParam(txt2)), 0);
+ EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt3)), 0);
+
+ // to make sure the length octet compares unsigned
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt4)), 0);
+ EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam(txt5).compare(TypeParam(txt4)), 0);
+ EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt5)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(TypeParam(txt1).compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_txt_unittest.cc b/src/lib/dns/tests/rdata_txt_unittest.cc
deleted file mode 100644
index e5f8ac9..0000000
--- a/src/lib/dns/tests/rdata_txt_unittest.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <util/buffer.h>
-#include <dns/exceptions.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-const generic::TXT rdata_txt("Test String");
-const generic::TXT rdata_txt_empty("");
-const generic::TXT rdata_txt_quoated("\"Test String\"");
-const uint8_t wiredata_txt[] = {
- sizeof("Test String") - 1,
- 'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g'
-};
-const uint8_t wiredata_nulltxt[] = { 0 };
-vector<uint8_t> wiredata_longesttxt(256, 'a');
-
-class Rdata_TXT_Test : public RdataTest {
-protected:
- Rdata_TXT_Test() {
- wiredata_longesttxt[0] = 255; // adjust length
- }
-};
-
-TEST_F(Rdata_TXT_Test, createFromText) {
- // normal case is covered in toWireBuffer.
-
- // surrounding double-quotes shouldn't change the result.
- EXPECT_EQ(0, rdata_txt.compare(rdata_txt_quoated));
-
- // Null character-string.
- obuffer.clear();
- generic::TXT(string("")).toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- wiredata_nulltxt, sizeof(wiredata_nulltxt));
-
- // Longest possible character-string.
- obuffer.clear();
- generic::TXT(string(255, 'a')).toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- &wiredata_longesttxt[0], wiredata_longesttxt.size());
-
- // Too long text for a valid character-string.
- EXPECT_THROW(generic::TXT(string(256, 'a')), CharStringTooLong);
-
- // The escape character makes the double quote a part of character-string,
- // so this is invalid input and should be rejected.
- EXPECT_THROW(generic::TXT("\"Test String\\\""), InvalidRdataText);
-
- // Terminating double-quote is provided, so this is valid, but in this
- // version of implementation we reject escaped characters.
- EXPECT_THROW(generic::TXT("\"Test String\\\"\""), InvalidRdataText);
-}
-
-void
-makeLargest(vector<uint8_t>& data) {
- uint8_t ch = 0;
-
- // create 255 sets of character-strings, each of which has the longest
- // length (255bytes string + 1-byte length field)
- for (int i = 0; i < 255; ++i, ++ch) {
- data.push_back(255);
- data.insert(data.end(), 255, ch);
- }
- // the last character-string should be 255 bytes (including the one-byte
- // length field) in length so that the total length should be in the range
- // of 16-bit integers.
- data.push_back(254);
- data.insert(data.end(), 254, ch);
-
- assert(data.size() == 65535);
-}
-
-TEST_F(Rdata_TXT_Test, createFromWire) {
- EXPECT_EQ(0, rdata_txt.compare(
- *rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire1")));
-
- // Empty character string
- EXPECT_EQ(0, rdata_txt_empty.compare(
- *rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire2.wire")));
-
- // Multiple character strings
- obuffer.clear();
- rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire3.wire")->toWire(obuffer);
- // the result should be 'wiredata_txt' repeated twice
- vector<uint8_t> expected_data(wiredata_txt, wiredata_txt +
- sizeof(wiredata_txt));
- expected_data.insert(expected_data.end(), wiredata_txt,
- wiredata_txt + sizeof(wiredata_txt));
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- &expected_data[0], expected_data.size());
-
- // Largest length of data. There's nothing special, but should be
- // constructed safely, and the content should be identical to the original
- // data.
- vector<uint8_t> largest_txt_data;
- makeLargest(largest_txt_data);
- InputBuffer ibuffer(&largest_txt_data[0], largest_txt_data.size());
- generic::TXT largest_txt(ibuffer, largest_txt_data.size());
- obuffer.clear();
- largest_txt.toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- &largest_txt_data[0], largest_txt_data.size());
-
- // rdlen parameter is out of range. This is a rare event because we'd
- // normally call the constructor via a polymorphic wrapper, where the
- // length is validated. But this should be checked explicitly.
- InputBuffer ibuffer2(&largest_txt_data[0], largest_txt_data.size());
- EXPECT_THROW(generic::TXT(ibuffer2, 65536), InvalidRdataLength);
-
- // RDATA is empty, which is invalid for TXT.
- EXPECT_THROW(rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire4.wire"),
- DNSMessageFORMERR);
-
- // character-string length is too large, which could cause overrun.
- EXPECT_THROW(rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire5.wire"),
- DNSMessageFORMERR);
-}
-
-TEST_F(Rdata_TXT_Test, toWireBuffer) {
- rdata_txt.toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- wiredata_txt, sizeof(wiredata_txt));
-}
-
-TEST_F(Rdata_TXT_Test, toText) {
- EXPECT_EQ("\"Test String\"", rdata_txt.toText());
-}
-}
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index d8f0d1c..27edf5f 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -90,6 +90,7 @@ EXTRA_DIST += question_fromWire question_toWire1 question_toWire2
EXTRA_DIST += rdatafields1.spec rdatafields2.spec rdatafields3.spec
EXTRA_DIST += rdatafields4.spec rdatafields5.spec rdatafields6.spec
EXTRA_DIST += rdata_cname_fromWire rdata_dname_fromWire rdata_dnskey_fromWire
+EXTRA_DIST += rdata_dhcid_fromWire rdata_dhcid_toWire
EXTRA_DIST += rdata_ds_fromWire rdata_in_a_fromWire rdata_in_aaaa_fromWire
EXTRA_DIST += rdata_mx_fromWire rdata_mx_toWire1 rdata_mx_toWire2
EXTRA_DIST += rdata_ns_fromWire
diff --git a/src/lib/dns/tests/testdata/rdata_dhcid_fromWire b/src/lib/dns/tests/testdata/rdata_dhcid_fromWire
new file mode 100644
index 0000000..0c8d56a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_dhcid_fromWire
@@ -0,0 +1,12 @@
+#
+# DHCID RDATA stored in an input buffer
+#
+# Valid RDATA for 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+#
+# RDLENGHT=41 bytes
+# 0 1
+ 00 29
+# 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+d0 b2 20 d0 bb d0 b5 d1 81 d1 83 20 d1 80 d0 be
+d0 b4 d0 b8 d0 bb d0 b0 d1 81 d1 8c 20 d1 91 d0
+bb d0 be d1 87 d0 ba d0 b0
diff --git a/src/lib/dns/tests/testdata/rdata_dhcid_toWire b/src/lib/dns/tests/testdata/rdata_dhcid_toWire
new file mode 100644
index 0000000..99ec229
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_dhcid_toWire
@@ -0,0 +1,7 @@
+#
+# DHCID RDATA stored in an output buffer
+#
+# 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+d0 b2 20 d0 bb d0 b5 d1 81 d1 83 20 d1 80 d0 be
+d0 b4 d0 b8 d0 bb d0 b0 d1 81 d1 8c 20 d1 91 d0
+bb d0 be d1 87 d0 ba d0 b0
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index 069a7b4..a5f793c 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -45,7 +45,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
-check_PROGRAMS = logger_example
+noinst_PROGRAMS = logger_example
logger_example_SOURCES = logger_example.cc
logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
@@ -53,7 +53,7 @@ logger_example_LDADD = $(top_builddir)/src/lib/log/liblog.la
logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-check_PROGRAMS += init_logger_test
+noinst_PROGRAMS += init_logger_test
init_logger_test_SOURCES = init_logger_test.cc
init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
@@ -61,7 +61,7 @@ init_logger_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-noinst_PROGRAMS = $(TESTS)
+noinst_PROGRAMS += $(TESTS)
# Additional test using the shell. These are principally tests
# where the global logging environment is affected, and where the
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index f90f7b6..a3e74c5 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,5 +1,5 @@
SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
-SUBDIRS += log_messages
+SUBDIRS += xfrin log_messages
python_PYTHON = __init__.py
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index d07df1e..11a13ec 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -510,10 +510,10 @@ class UIModuleCCSession(MultiConfigData):
def _remove_value_from_list(self, identifier, value):
if value is None:
- # we are directly removing an list index
+ # we are directly removing a list index
id, list_indices = isc.cc.data.split_identifier_list_indices(identifier)
if list_indices is None:
- raise DataTypeError("identifier in remove_value() does not contain a list index, and no value to remove")
+ raise isc.cc.data.DataTypeError("identifier in remove_value() does not contain a list index, and no value to remove")
else:
self.set_value(identifier, None)
else:
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index 351c8e6..1c63957 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -747,6 +747,9 @@ class TestUIModuleCCSession(unittest.TestCase):
self.assertEqual({'Spec2': {'item5': []}}, uccs._local_changes)
uccs.add_value("Spec2/item5", None);
self.assertEqual({'Spec2': {'item5': ['']}}, uccs._local_changes)
+ # Intending to empty a list element, but forget specifying the index.
+ self.assertRaises(isc.cc.data.DataTypeError,
+ uccs.remove_value, "Spec2/item5", None)
def test_add_remove_value_named_set(self):
fake_conn = fakeUIConn()
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index ca2d9b4..60282d9 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -24,7 +24,6 @@ datasrc_la_LDFLAGS += -module
datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
datasrc_la_LIBADD += $(PYTHON_LIB)
-#datasrc_la_LIBADD += $(SQLITE_LIBS)
EXTRA_DIST = client_inc.cc
EXTRA_DIST += finder_inc.cc
diff --git a/src/lib/python/isc/datasrc/__init__.py b/src/lib/python/isc/datasrc/__init__.py
index 0b4ed98..ea0c0a7 100644
--- a/src/lib/python/isc/datasrc/__init__.py
+++ b/src/lib/python/isc/datasrc/__init__.py
@@ -1,6 +1,17 @@
import sys
import os
+# The datasource factory loader uses dlopen, as does python
+# for its modules. Some dynamic linkers do not play nice if
+# modules are not loaded with RTLD_GLOBAL, a symptom of which
+# is that exceptions are not recognized by type. So to make
+# sure this doesn't happen, we temporarily set RTLD_GLOBAL
+# during the loading of the datasource wrappers.
+if sys.platform != 'win32':
+ import ctypes
+ flags = sys.getdlopenflags()
+ sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL)
+
# this setup is a temporary workaround to deal with the problem of
# having both 'normal' python modules and a wrapper module
# Once all programs use the new interface, we should remove the
@@ -16,6 +27,11 @@ if intree:
from datasrc import *
else:
from isc.datasrc.datasrc import *
+
+# revert to the default dlopen flags
+if sys.platform != 'win32':
+ sys.setdlopenflags(flags)
+
from isc.datasrc.sqlite3_ds import *
from isc.datasrc.master import *
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index 1eba488..b81f48d 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -7,7 +7,20 @@ This is the python wrapper for the abstract base class that defines\n\
the common interface for various types of data source clients. A data\n\
source client is a top level access point to a data source, allowing \n\
various operations on the data source such as lookups, traversing or \n\
-updates. The client class itself has limited focus and delegates \n\
+updates.\n\
+This class serves as both the factory and the main interface to those \n\
+classes.\n\
+\n\
+The constructor takes two arguments; a type (string), and\n\
+configuration data for a datasource client of that type. The configuration\n\
+data is currently passed as a JSON in string form, and its contents depend\n\
+on the type of datasource from the first argument. For instance, a\n\
+datasource of type \"sqlite3\" takes the config \n\
+{ \"database_file\": \"/var/example.org\" }\n\
+We may in the future add support for passing configuration data,\n\
+but right now we limit it to a JSON-formatted string\n\
+\n\
+The client class itself has limited focus and delegates \n\
the responsibility for these specific operations to other (c++) classes;\n\
in general methods of this class act as factories of these other classes.\n\
\n\
@@ -110,7 +123,7 @@ Return an updater to make updates to a specific zone.\n\
The RR class of the zone is the one that the client is expected to\n\
handle (see the detailed description of this class).\n\
\n\
-If the specified zone is not found via the client, a NULL pointer will\n\
+If the specified zone is not found via the client, a None object will\n\
be returned; in other words a completely new zone cannot be created\n\
using an updater. It must be created beforehand (even if it's an empty\n\
placeholder) in a way specific to the underlying data source.\n\
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 984eabf..caebd25 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -23,6 +23,7 @@
#include <util/python/pycppwrapper_util.h>
#include <datasrc/client.h>
+#include <datasrc/factory.h>
#include <datasrc/database.h>
#include <datasrc/data_source.h>
#include <datasrc/sqlite3_accessor.h>
@@ -50,13 +51,9 @@ namespace {
class s_DataSourceClient : public PyObject {
public:
s_DataSourceClient() : cppobj(NULL) {};
- DataSourceClient* cppobj;
+ DataSourceClientContainer* cppobj;
};
-// Shortcut type which would be convenient for adding class variables safely.
-typedef CPPPyObjectContainer<s_DataSourceClient, DataSourceClient>
- DataSourceClientContainer;
-
PyObject*
DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
@@ -64,12 +61,12 @@ DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
if (PyArg_ParseTuple(args, "O!", &name_type, &name)) {
try {
DataSourceClient::FindResult find_result(
- self->cppobj->findZone(PyName_ToName(name)));
+ self->cppobj->getInstance().findZone(PyName_ToName(name)));
result::Result r = find_result.code;
ZoneFinderPtr zfp = find_result.zone_finder;
// Use N instead of O so refcount isn't increased twice
- return (Py_BuildValue("IN", r, createZoneFinderObject(zfp)));
+ return (Py_BuildValue("IN", r, createZoneFinderObject(zfp, po_self)));
} catch (const std::exception& exc) {
PyErr_SetString(getDataSourceException("Error"), exc.what());
return (NULL);
@@ -90,7 +87,8 @@ DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
try {
return (createZoneIteratorObject(
- self->cppobj->getIterator(PyName_ToName(name_obj))));
+ self->cppobj->getInstance().getIterator(PyName_ToName(name_obj)),
+ po_self));
} catch (const isc::NotImplemented& ne) {
PyErr_SetString(getDataSourceException("NotImplemented"),
ne.what());
@@ -120,9 +118,13 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
PyBool_Check(replace_obj)) {
bool replace = (replace_obj != Py_False);
try {
- return (createZoneUpdaterObject(
- self->cppobj->getUpdater(PyName_ToName(name_obj),
- replace)));
+ ZoneUpdaterPtr updater =
+ self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
+ replace);
+ if (!updater) {
+ return (Py_None);
+ }
+ return (createZoneUpdaterObject(updater, po_self));
} catch (const isc::NotImplemented& ne) {
PyErr_SetString(getDataSourceException("NotImplemented"),
ne.what());
@@ -162,22 +164,33 @@ PyMethodDef DataSourceClient_methods[] = {
int
DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
- // TODO: we should use the factory function which hasn't been written
- // yet. For now we hardcode the sqlite3 initialization, and pass it one
- // string for the database file. (similar to how the 'old direct'
- // sqlite3_ds code works)
+ char* ds_type_str;
+ char* ds_config_str;
try {
- char* db_file_name;
- if (PyArg_ParseTuple(args, "s", &db_file_name)) {
- boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
- new SQLite3Accessor(db_file_name, isc::dns::RRClass::IN()));
- self->cppobj = new DatabaseClient(isc::dns::RRClass::IN(),
- sqlite3_accessor);
+ // Turn the given argument into config Element; then simply call
+ // factory class to do its magic
+
+ // for now, ds_config must be JSON string
+ if (PyArg_ParseTuple(args, "ss", &ds_type_str, &ds_config_str)) {
+ isc::data::ConstElementPtr ds_config =
+ isc::data::Element::fromJSON(ds_config_str);
+ self->cppobj = new DataSourceClientContainer(ds_type_str,
+ ds_config);
return (0);
} else {
return (-1);
}
-
+ } catch (const isc::data::JSONError& je) {
+ const string ex_what = "JSON parse error in data source configuration "
+ "data for type " +
+ string(ds_type_str) + ":" + je.what();
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (const DataSourceError& dse) {
+ const string ex_what = "Failed to create DataSourceClient of type " +
+ string(ds_type_str) + ":" + dse.what();
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
} catch (const exception& ex) {
const string ex_what = "Failed to construct DataSourceClient object: " +
string(ex.what());
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 4b0324a..7676104 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -77,14 +77,26 @@ initModulePart_DataSourceClient(PyObject* mod) {
}
Py_INCREF(&datasourceclient_type);
- addClassVariable(datasourceclient_type, "SUCCESS",
- Py_BuildValue("I", result::SUCCESS));
- addClassVariable(datasourceclient_type, "EXIST",
- Py_BuildValue("I", result::EXIST));
- addClassVariable(datasourceclient_type, "NOTFOUND",
- Py_BuildValue("I", result::NOTFOUND));
- addClassVariable(datasourceclient_type, "PARTIALMATCH",
- Py_BuildValue("I", result::PARTIALMATCH));
+ try {
+ installClassVariable(datasourceclient_type, "SUCCESS",
+ Py_BuildValue("I", result::SUCCESS));
+ installClassVariable(datasourceclient_type, "EXIST",
+ Py_BuildValue("I", result::EXIST));
+ installClassVariable(datasourceclient_type, "NOTFOUND",
+ Py_BuildValue("I", result::NOTFOUND));
+ installClassVariable(datasourceclient_type, "PARTIALMATCH",
+ Py_BuildValue("I", result::PARTIALMATCH));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in DataSourceClient initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in DataSourceClient initialization");
+ return (false);
+ }
return (true);
}
@@ -103,26 +115,41 @@ initModulePart_ZoneFinder(PyObject* mod) {
}
Py_INCREF(&zonefinder_type);
- addClassVariable(zonefinder_type, "SUCCESS",
- Py_BuildValue("I", ZoneFinder::SUCCESS));
- addClassVariable(zonefinder_type, "DELEGATION",
- Py_BuildValue("I", ZoneFinder::DELEGATION));
- addClassVariable(zonefinder_type, "NXDOMAIN",
- Py_BuildValue("I", ZoneFinder::NXDOMAIN));
- addClassVariable(zonefinder_type, "NXRRSET",
- Py_BuildValue("I", ZoneFinder::NXRRSET));
- addClassVariable(zonefinder_type, "CNAME",
- Py_BuildValue("I", ZoneFinder::CNAME));
- addClassVariable(zonefinder_type, "DNAME",
- Py_BuildValue("I", ZoneFinder::DNAME));
-
- addClassVariable(zonefinder_type, "FIND_DEFAULT",
- Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
- addClassVariable(zonefinder_type, "FIND_GLUE_OK",
- Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
- addClassVariable(zonefinder_type, "FIND_DNSSEC",
- Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
-
+ try {
+ installClassVariable(zonefinder_type, "SUCCESS",
+ Py_BuildValue("I", ZoneFinder::SUCCESS));
+ installClassVariable(zonefinder_type, "DELEGATION",
+ Py_BuildValue("I", ZoneFinder::DELEGATION));
+ installClassVariable(zonefinder_type, "NXDOMAIN",
+ Py_BuildValue("I", ZoneFinder::NXDOMAIN));
+ installClassVariable(zonefinder_type, "NXRRSET",
+ Py_BuildValue("I", ZoneFinder::NXRRSET));
+ installClassVariable(zonefinder_type, "CNAME",
+ Py_BuildValue("I", ZoneFinder::CNAME));
+ installClassVariable(zonefinder_type, "DNAME",
+ Py_BuildValue("I", ZoneFinder::DNAME));
+ installClassVariable(zonefinder_type, "WILDCARD",
+ Py_BuildValue("I", ZoneFinder::WILDCARD));
+ installClassVariable(zonefinder_type, "WILDCARD_NXRRSET",
+ Py_BuildValue("I", ZoneFinder::WILDCARD_NXRRSET));
+
+ installClassVariable(zonefinder_type, "FIND_DEFAULT",
+ Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
+ installClassVariable(zonefinder_type, "FIND_GLUE_OK",
+ Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
+ installClassVariable(zonefinder_type, "FIND_DNSSEC",
+ Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in ZoneFinder initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in ZoneFinder initialization");
+ return (false);
+ }
return (true);
}
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
index 2b47d02..bc8e62c 100644
--- a/src/lib/python/isc/datasrc/finder_inc.cc
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -62,6 +62,10 @@ Search the zone for a given pair of domain name and RR type.\n\
and the code of SUCCESS will be returned.\n\
- If the search name matches a delegation point of DNAME, it returns\n\
the code of DNAME and that DNAME RR.\n\
+- If the result was synthesized by a wildcard match, it returns the\n\
+ code WILDCARD and the synthesized RRset\n\
+- If the query matched a wildcard name, but not its type, it returns the\n\
+ code WILDCARD_NXRRSET, and None\n\
- If the target is a list, all RRsets under the domain are inserted\n\
there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
instead of normall processing. This is intended to handle ANY query.\n\
@@ -93,4 +97,22 @@ Parameters:\n\
Return Value(s): A tuple of a result code an a FindResult object enclosing\n\
the search result (see above).\n\
";
+
+const char* const ZoneFinder_find_previous_name_doc = "\
+find_previous_name(isc.dns.Name) -> isc.dns.Name\n\
+\n\
+Gets the previous name in the DNSSEC order. This can be used\n\
+to find the correct NSEC records for proving nonexistence\n\
+of domains.\n\
+\n\
+This method does not include under-zone-cut data (glue data).\n\
+\n\
+Raises isc.datasrc.NotImplemented in case the data source backend\n\
+doesn't support DNSSEC or there is no previous in the zone (NSEC\n\
+records might be missing in the DB, the queried name is less or\n\
+equal to the apex).\n\
+\n\
+Raises isc.datasrc.Error for low-level or internal datasource errors\n\
+(like broken connection to database, wrong data living there).\n\
+";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index 598d300..cb02724 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -103,8 +103,14 @@ namespace {
// The s_* Class simply covers one instantiation of the object
class s_ZoneFinder : public PyObject {
public:
- s_ZoneFinder() : cppobj(ZoneFinderPtr()) {};
+ s_ZoneFinder() : cppobj(ZoneFinderPtr()), base_obj(NULL) {};
ZoneFinderPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
};
// Shortcut type which would be convenient for adding class variables safely.
@@ -125,6 +131,9 @@ ZoneFinder_destroy(s_ZoneFinder* const self) {
// cppobj is a shared ptr, but to make sure things are not destroyed in
// the wrong order, we reset it here.
self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
Py_TYPE(self)->tp_free(self);
}
@@ -160,6 +169,31 @@ ZoneFinder_find(PyObject* po_self, PyObject* args) {
return (isc_datasrc_internal::ZoneFinder_helper(self->cppobj.get(), args));
}
+PyObject*
+ZoneFinder_findPreviousName(PyObject* po_self, PyObject* args) {
+ s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+ PyObject* name_obj;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ try {
+ return (createNameObject(
+ self->cppobj->findPreviousName(PyName_ToName(name_obj))));
+ } catch (const isc::NotImplemented& nie) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ nie.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
// This list contains the actual set of functions we have in
// python. Each entry has
// 1. Python method name
@@ -167,12 +201,12 @@ ZoneFinder_find(PyObject* po_self, PyObject* args) {
// 3. Argument type
// 4. Documentation
PyMethodDef ZoneFinder_methods[] = {
- { "get_origin", reinterpret_cast<PyCFunction>(ZoneFinder_getOrigin),
- METH_NOARGS, ZoneFinder_getOrigin_doc },
- { "get_class", reinterpret_cast<PyCFunction>(ZoneFinder_getClass),
- METH_NOARGS, ZoneFinder_getClass_doc },
- { "find", reinterpret_cast<PyCFunction>(ZoneFinder_find), METH_VARARGS,
- ZoneFinder_find_doc },
+ { "get_origin", ZoneFinder_getOrigin, METH_NOARGS,
+ ZoneFinder_getOrigin_doc },
+ { "get_class", ZoneFinder_getClass, METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", ZoneFinder_find, METH_VARARGS, ZoneFinder_find_doc },
+ { "find_previous_name", ZoneFinder_findPreviousName, METH_VARARGS,
+ ZoneFinder_find_previous_name_doc },
{ NULL, NULL, 0, NULL }
};
@@ -233,11 +267,15 @@ PyTypeObject zonefinder_type = {
};
PyObject*
-createZoneFinderObject(isc::datasrc::ZoneFinderPtr source) {
+createZoneFinderObject(isc::datasrc::ZoneFinderPtr source, PyObject* base_obj) {
s_ZoneFinder* py_zi = static_cast<s_ZoneFinder*>(
zonefinder_type.tp_alloc(&zonefinder_type, 0));
if (py_zi != NULL) {
py_zi->cppobj = source;
+ py_zi->base_obj = base_obj;
+ }
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
}
return (py_zi);
}
diff --git a/src/lib/python/isc/datasrc/finder_python.h b/src/lib/python/isc/datasrc/finder_python.h
index 5f2404e..23bc457 100644
--- a/src/lib/python/isc/datasrc/finder_python.h
+++ b/src/lib/python/isc/datasrc/finder_python.h
@@ -24,7 +24,15 @@ namespace python {
extern PyTypeObject zonefinder_type;
-PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source);
+/// \brief Create a ZoneFinder python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneFinder depends on
+/// Its refcount is increased, and will be decreased when
+/// this zone iterator is destroyed, making sure that the
+/// base object is never destroyed before this zonefinder.
+PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source,
+ PyObject* base_obj = NULL);
} // namespace python
} // namespace datasrc
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
index b482ea6..c52ab4a 100644
--- a/src/lib/python/isc/datasrc/iterator_python.cc
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -45,8 +45,14 @@ namespace {
// The s_* Class simply covers one instantiation of the object
class s_ZoneIterator : public PyObject {
public:
- s_ZoneIterator() : cppobj(ZoneIteratorPtr()) {};
+ s_ZoneIterator() : cppobj(ZoneIteratorPtr()), base_obj(NULL) {};
ZoneIteratorPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
};
// Shortcut type which would be convenient for adding class variables safely.
@@ -68,6 +74,9 @@ ZoneIterator_destroy(s_ZoneIterator* const self) {
// cppobj is a shared ptr, but to make sure things are not destroyed in
// the wrong order, we reset it here.
self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
Py_TYPE(self)->tp_free(self);
}
@@ -187,11 +196,17 @@ PyTypeObject zoneiterator_type = {
};
PyObject*
-createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source) {
+createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
+ PyObject* base_obj)
+{
s_ZoneIterator* py_zi = static_cast<s_ZoneIterator*>(
zoneiterator_type.tp_alloc(&zoneiterator_type, 0));
if (py_zi != NULL) {
py_zi->cppobj = source;
+ py_zi->base_obj = base_obj;
+ }
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
}
return (py_zi);
}
diff --git a/src/lib/python/isc/datasrc/iterator_python.h b/src/lib/python/isc/datasrc/iterator_python.h
index b457740..7c1b0eb 100644
--- a/src/lib/python/isc/datasrc/iterator_python.h
+++ b/src/lib/python/isc/datasrc/iterator_python.h
@@ -25,7 +25,15 @@ namespace python {
extern PyTypeObject zoneiterator_type;
-PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source);
+/// \brief Create a ZoneIterator python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneIterator depends on
+/// Its refcount is increased, and will be decreased when
+/// this zone iterator is destroyed, making sure that the
+/// base object is never destroyed before this zone iterator.
+PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
+ PyObject* base_obj = NULL);
} // namespace python
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index be30dfa..411b5cc 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -10,9 +10,14 @@ CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
-LIBRARY_PATH_PLACEHOLDER =
+# We always add one, the location of the data source modules
+# We may want to add an API method for this to the ds factory, but that is out
+# of scope for this ticket
+LIBRARY_PATH_PLACEHOLDER = $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs:
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 15ceb80..75a0cfb 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -19,14 +19,16 @@ import isc.dns
import unittest
import os
import shutil
+import json
TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
-BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
-NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
+
+READ_ZONE_DB_CONFIG = "{ \"database_file\": \"" + READ_ZONE_DB_FILE + "\" }"
+WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}"
def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
rrset_to_add = isc.dns.RRset(name, rrclass, rrtype, ttl)
@@ -59,13 +61,27 @@ def check_for_rrset(expected_rrsets, rrset):
class DataSrcClient(unittest.TestCase):
- def test_construct(self):
+ def test_constructors(self):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
+ self.assertRaises(TypeError, isc.datasrc.DataSourceClient, 1, "{}")
+ self.assertRaises(TypeError, isc.datasrc.DataSourceClient, "sqlite3", 1)
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "foo", "{}")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "sqlite3", "")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "sqlite3", "{}")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "sqlite3",
+ "{ \"foo\": 1 }")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "memory",
+ "{ \"foo\": 1 }")
def test_iterate(self):
- dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
# for RRSIGS, the TTL's are currently modified. This test should
# start failing when we fix that.
@@ -176,7 +192,7 @@ class DataSrcClient(unittest.TestCase):
self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
def test_find(self):
- dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
@@ -231,6 +247,21 @@ class DataSrcClient(unittest.TestCase):
"cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
rrset.to_text())
+ result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.WILDCARD, result)
+ self.assertEqual("foo.wild.example.com. 3600 IN A 192.0.2.255\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
+ isc.dns.RRType.TXT(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.WILDCARD_NXRRSET, result)
+ self.assertEqual(None, rrset)
+
self.assertRaises(TypeError, finder.find,
"foo",
isc.dns.RRType.A(),
@@ -247,6 +278,24 @@ class DataSrcClient(unittest.TestCase):
None,
"foo")
+ def test_find_previous(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+
+ prev = finder.find_previous_name(isc.dns.Name("bbb.example.com"))
+ self.assertEqual("example.com.", prev.to_text())
+
+ prev = finder.find_previous_name(isc.dns.Name("zzz.example.com"))
+ self.assertEqual("www.example.com.", prev.to_text())
+
+ prev = finder.find_previous_name(prev)
+ self.assertEqual("*.wild.example.com.", prev.to_text())
+
+ self.assertRaises(isc.datasrc.NotImplemented,
+ finder.find_previous_name,
+ isc.dns.Name("com"))
class DataSrcUpdater(unittest.TestCase):
@@ -260,7 +309,7 @@ class DataSrcUpdater(unittest.TestCase):
def test_update_delete_commit(self):
- dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
# first make sure, through a separate finder, that some record exists
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
@@ -333,8 +382,40 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
+ def test_two_modules(self):
+ # load two modules, and check if they don't interfere
+ mem_cfg = { "type": "memory", "class": "IN", "zones": [] };
+ dsc_mem = isc.datasrc.DataSourceClient("memory", json.dumps(mem_cfg))
+ dsc_sql = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+ # check if exceptions are working
+ self.assertRaises(isc.datasrc.Error, isc.datasrc.DataSourceClient,
+ "memory", "{}")
+ self.assertRaises(isc.datasrc.Error, isc.datasrc.DataSourceClient,
+ "sqlite3", "{}")
+
+ # see if a lookup succeeds in sqlite3 ds
+ result, finder = dsc_sql.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ # see if a lookup fails in mem ds
+ result, finder = dsc_mem.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.NXDOMAIN, result)
+
+
def test_update_delete_abort(self):
- dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+ # we don't do enything with this one, just making sure loading two
+ # datasources
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
# first make sure, through a separate finder, that some record exists
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
@@ -383,6 +464,11 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
+ def test_update_for_no_zone(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ self.assertEqual(None,
+ dsc.get_updater(isc.dns.Name("notexistent.example"),
+ True))
if __name__ == "__main__":
isc.log.init("bind10")
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
index a9dc581..e447622 100644
--- a/src/lib/python/isc/datasrc/updater_python.cc
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -54,8 +54,14 @@ namespace {
// The s_* Class simply covers one instantiation of the object
class s_ZoneUpdater : public PyObject {
public:
- s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()) {};
+ s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()), base_obj(NULL) {};
ZoneUpdaterPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
};
// Shortcut type which would be convenient for adding class variables safely.
@@ -81,6 +87,9 @@ ZoneUpdater_destroy(s_ZoneUpdater* const self) {
// cppobj is a shared ptr, but to make sure things are not destroyed in
// the wrong order, we reset it here.
self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
Py_TYPE(self)->tp_free(self);
}
@@ -176,51 +185,6 @@ ZoneUpdater_find(PyObject* po_self, PyObject* args) {
args));
}
-PyObject*
-AZoneUpdater_find(PyObject* po_self, PyObject* args) {
- s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
- PyObject *name;
- PyObject *rrtype;
- PyObject *target;
- int options_int;
- if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
- &rrtype_type, &rrtype,
- &target, &options_int)) {
- try {
- ZoneFinder::FindOptions options =
- static_cast<ZoneFinder::FindOptions>(options_int);
- ZoneFinder::FindResult find_result(
- self->cppobj->getFinder().find(PyName_ToName(name),
- PyRRType_ToRRType(rrtype),
- NULL,
- options
- ));
- ZoneFinder::Result r = find_result.code;
- isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
- if (rrsp) {
- // Use N instead of O so the refcount isn't increased twice
- return Py_BuildValue("IN", r, createRRsetObject(*rrsp));
- } else {
- return Py_BuildValue("IO", r, Py_None);
- }
- } catch (const DataSourceError& dse) {
- PyErr_SetString(getDataSourceException("Error"), dse.what());
- return (NULL);
- } catch (const std::exception& exc) {
- PyErr_SetString(getDataSourceException("Error"), exc.what());
- return (NULL);
- } catch (...) {
- PyErr_SetString(getDataSourceException("Error"),
- "Unexpected exception");
- return (NULL);
- }
- } else {
- return (NULL);
- }
- return Py_BuildValue("I", 1);
-}
-
-
// This list contains the actual set of functions we have in
// python. Each entry has
// 1. Python method name
@@ -303,12 +267,17 @@ PyTypeObject zoneupdater_type = {
};
PyObject*
-createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source) {
+createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
+ PyObject* base_obj)
+{
s_ZoneUpdater* py_zi = static_cast<s_ZoneUpdater*>(
zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
if (py_zi != NULL) {
py_zi->cppobj = source;
}
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
return (py_zi);
}
diff --git a/src/lib/python/isc/datasrc/updater_python.h b/src/lib/python/isc/datasrc/updater_python.h
index 3886aa3..8228578 100644
--- a/src/lib/python/isc/datasrc/updater_python.h
+++ b/src/lib/python/isc/datasrc/updater_python.h
@@ -26,7 +26,15 @@ namespace python {
extern PyTypeObject zoneupdater_type;
-PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source);
+/// \brief Create a ZoneUpdater python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneUpdater depends on
+/// It's refcount is increased, and will be decreased when
+/// this zone iterator is destroyed, making sure that the
+/// base object is never destroyed before this zone updater.
+PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
+ PyObject* base_obj = NULL);
} // namespace python
diff --git a/src/lib/python/isc/dns/Makefile.am b/src/lib/python/isc/dns/Makefile.am
index 161c2a5..b31da93 100644
--- a/src/lib/python/isc/dns/Makefile.am
+++ b/src/lib/python/isc/dns/Makefile.am
@@ -1,4 +1,5 @@
python_PYTHON = __init__.py
+pythondir = $(pyexecdir)/isc/dns
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index 7a700f9..17452f0 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -185,7 +185,7 @@ init(PyObject*, PyObject* args) {
Py_RETURN_NONE;
}
-// This initialization is for unit tests. It allows message settings to be
+// This initialization is for unit tests. It allows message settings to
// be determined by a set of B10_xxx environment variables. (See the
// description of initLogger() for more details.) The function has been named
// resetUnitTestRootLogger() here as being more descriptive and
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
index b9bc4c8..30f8374 100644
--- a/src/lib/python/isc/log_messages/Makefile.am
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -11,6 +11,7 @@ EXTRA_DIST += zonemgr_messages.py
EXTRA_DIST += cfgmgr_messages.py
EXTRA_DIST += config_messages.py
EXTRA_DIST += notify_out_messages.py
+EXTRA_DIST += libxfrin_messages.py
CLEANFILES = __init__.pyc
CLEANFILES += bind10_messages.pyc
@@ -23,6 +24,7 @@ CLEANFILES += zonemgr_messages.pyc
CLEANFILES += cfgmgr_messages.pyc
CLEANFILES += config_messages.pyc
CLEANFILES += notify_out_messages.pyc
+CLEANFILES += libxfrin_messages.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/log_messages/libxfrin_messages.py b/src/lib/python/isc/log_messages/libxfrin_messages.py
new file mode 100644
index 0000000..74da329
--- /dev/null
+++ b/src/lib/python/isc/log_messages/libxfrin_messages.py
@@ -0,0 +1 @@
+from work.libxfrin_messages import *
diff --git a/src/lib/python/isc/xfrin/Makefile.am b/src/lib/python/isc/xfrin/Makefile.am
new file mode 100644
index 0000000..5804de6
--- /dev/null
+++ b/src/lib/python/isc/xfrin/Makefile.am
@@ -0,0 +1,23 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py diff.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = libxfrin_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.pyc
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py: libxfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libxfrin_messages.mes
+
+pythondir = $(pyexecdir)/isc/xfrin
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/xfrin/__init__.py b/src/lib/python/isc/xfrin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
new file mode 100644
index 0000000..a2d9a7d
--- /dev/null
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -0,0 +1,237 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This helps the XFR in process with accumulating parts of diff and applying
+it to the datasource.
+
+The name of the module is not yet fully decided. We might want to move it
+under isc.datasrc or somewhere else, because we might want to reuse it with
+future DDNS process. But until then, it lives here.
+"""
+
+import isc.dns
+import isc.log
+from isc.log_messages.libxfrin_messages import *
+
+class NoSuchZone(Exception):
+ """
+ This is raised if a diff for non-existant zone is being created.
+ """
+ pass
+
+"""
+This is the amount of changes we accumulate before calling Diff.apply
+automatically.
+
+The number 100 is just taken from BIND 9. We don't know the rationale
+for exactly this amount, but we think it is just some randomly chosen
+number.
+"""
+# If changing this, modify the tests accordingly as well.
+DIFF_APPLY_TRESHOLD = 100
+
+logger = isc.log.Logger('libxfrin')
+
+class Diff:
+ """
+ The class represents a diff against current state of datasource on
+ one zone. The usual way of working with it is creating it, then putting
+ bunch of changes in and commiting at the end.
+
+ If you change your mind, you can just stop using the object without
+ really commiting it. In that case no changes will happen in the data
+ sounce.
+
+ The class works as a kind of a buffer as well, it does not direct
+ the changes to underlying data source right away, but keeps them for
+ a while.
+ """
+ def __init__(self, ds_client, zone, replace=False):
+ """
+ Initializes the diff to a ready state. It checks the zone exists
+ in the datasource and if not, NoSuchZone is raised. This also creates
+ a transaction in the data source.
+
+ The ds_client is the datasource client containing the zone. Zone is
+ isc.dns.Name object representing the name of the zone (its apex).
+ If replace is true, the content of the whole zone is wiped out before
+ applying the diff.
+
+ You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
+ exceptions.
+ """
+ self.__updater = ds_client.get_updater(zone, replace)
+ if self.__updater is None:
+ # The no such zone case
+ raise NoSuchZone("Zone " + str(zone) +
+ " does not exist in the data source " +
+ str(ds_client))
+ self.__buffer = []
+
+ def __check_commited(self):
+ """
+ This checks if the diff is already commited or broken. If it is, it
+ raises ValueError. This check is for methods that need to work only on
+ yet uncommited diffs.
+ """
+ if self.__updater is None:
+ raise ValueError("The diff is already commited or it has raised " +
+ "an exception, you come late")
+
+ def __data_common(self, rr, operation):
+ """
+ Schedules an operation with rr.
+
+ It does all the real work of add_data and delete_data, including
+ all checks.
+ """
+ self.__check_commited()
+ if rr.get_rdata_count() != 1:
+ raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
+ 'it holds ' + str(rr.get_rdata_count()))
+ if rr.get_class() != self.__updater.get_class():
+ raise ValueError("The rrset's class " + str(rr.get_class()) +
+ " does not match updater's " +
+ str(self.__updater.get_class()))
+ self.__buffer.append((operation, rr))
+ if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
+ # Time to auto-apply, so the data don't accumulate too much
+ self.apply()
+
+ def add_data(self, rr):
+ """
+ Schedules addition of an RR into the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'add')
+
+ def delete_data(self, rr):
+ """
+ Schedules deleting an RR from the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'delete')
+
+ def compact(self):
+ """
+ Tries to compact the operations in buffer a little by putting some of
+ the operations together, forming RRsets with more than one RR.
+
+ This is called by apply before putting the data into datasource. You
+ may, but not have to, call this manually.
+
+ Currently it merges consecutive same operations on the same
+ domain/type. We could do more fancy things, like sorting by the domain
+ and do more merging, but such diffs should be rare in practice anyway,
+ so we don't bother and do it this simple way.
+ """
+ buf = []
+ for (op, rrset) in self.__buffer:
+ old = buf[-1][1] if len(buf) > 0 else None
+ if old is None or op != buf[-1][0] or \
+ rrset.get_name() != old.get_name() or \
+ rrset.get_type() != old.get_type():
+ buf.append((op, isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())))
+ if rrset.get_ttl() != buf[-1][1].get_ttl():
+ logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
+ buf[-1][1].get_ttl())
+ for rdatum in rrset.get_rdata():
+ buf[-1][1].add_rdata(rdatum)
+ self.__buffer = buf
+
+ def apply(self):
+ """
+ Push the buffered changes inside this diff down into the data source.
+ This does not stop you from adding more changes later through this
+ diff and it does not close the datasource transaction, so the changes
+ will not be shown to others yet. It just means the internal memory
+ buffer is flushed.
+
+ This is called from time to time automatically, but you can call it
+ manually if you really want to.
+
+ This raises ValueError if the diff was already commited.
+
+ It also can raise isc.datasrc.Error. If that happens, you should stop
+ using this object and abort the modification.
+ """
+ self.__check_commited()
+ # First, compact the data
+ self.compact()
+ try:
+ # Then pass the data inside the data source
+ for (operation, rrset) in self.__buffer:
+ if operation == 'add':
+ self.__updater.add_rrset(rrset)
+ elif operation == 'delete':
+ self.__updater.delete_rrset(rrset)
+ else:
+ raise ValueError('Unknown operation ' + operation)
+ # As everything is already in, drop the buffer
+ except:
+ # If there's a problem, we can't continue.
+ self.__updater = None
+ raise
+
+ self.__buffer = []
+
+ def commit(self):
+ """
+ Writes all the changes into the data source and makes them visible.
+ This closes the diff, you may not use it any more. If you try to use
+ it, you'll get ValueError.
+
+ This might raise isc.datasrc.Error.
+ """
+ self.__check_commited()
+ # Push the data inside the data source
+ self.apply()
+ # Make sure they are visible.
+ try:
+ self.__updater.commit()
+ finally:
+ # Remove the updater. That will free some resources for one, but
+ # mark this object as already commited, so we can check
+
+ # We delete it even in case the commit failed, as that makes us
+ # unusable.
+ self.__updater = None
+
+ def get_buffer(self):
+ """
+ Returns the current buffer of changes not yet passed into the data
+ source. It is in a form like [('add', rrset), ('delete', rrset),
+ ('delete', rrset), ...].
+
+ Probably useful only for testing and introspection purposes. Don't
+ modify the list.
+ """
+ return self.__buffer
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
new file mode 100644
index 0000000..be943c8
--- /dev/null
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -0,0 +1,21 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libxfrin_messages python module.
+
+% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
new file mode 100644
index 0000000..416d62b
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -0,0 +1,24 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = diff_tests.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
new file mode 100644
index 0000000..9fab890
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import unittest
+from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
+from isc.xfrin.diff import Diff, NoSuchZone
+
+class TestError(Exception):
+ """
+ Just to have something to be raised during the tests.
+ Not used outside.
+ """
+ pass
+
+class DiffTest(unittest.TestCase):
+ """
+ Tests for the isc.xfrin.diff.Diff class.
+
+ It also plays role of a data source and an updater, so it can manipulate
+ some test variables while being called.
+ """
+ def setUp(self):
+ """
+ This sets internal variables so we can see nothing was called yet.
+
+ It also creates some variables used in multiple tests.
+ """
+ # Track what was called already
+ self.__updater_requested = False
+ self.__compact_called = False
+ self.__data_operations = []
+ self.__apply_called = False
+ self.__commit_called = False
+ self.__broken_called = False
+ self.__warn_called = False
+ self.__should_replace = False
+ # Some common values
+ self.__rrclass = RRClass.IN()
+ self.__type = RRType.A()
+ self.__ttl = RRTTL(3600)
+ # And RRsets
+ # Create two valid rrsets
+ self.__rrset1 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rdata = Rdata(self.__type, self.__rrclass, '192.0.2.1')
+ self.__rrset1.add_rdata(self.__rdata)
+ self.__rrset2 = RRset(Name('b.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset2.add_rdata(self.__rdata)
+ # And two invalid
+ self.__rrset_empty = RRset(Name('empty.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi = RRset(Name('multi.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi.add_rdata(self.__rdata)
+ self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
+ '192.0.2.2'))
+
+ def __mock_compact(self):
+ """
+ This can be put into the diff to hook into its compact method and see
+ if it gets called.
+ """
+ self.__compact_called = True
+
+ def __mock_apply(self):
+ """
+ This can be put into the diff to hook into its apply method and see
+ it gets called.
+ """
+ self.__apply_called = True
+
+ def __broken_operation(self, *args):
+ """
+ This can be used whenever an operation should fail. It raises TestError.
+ It should take whatever amount of parameters needed, so it can be put
+ quite anywhere.
+ """
+ self.__broken_called = True
+ raise TestError("Test error")
+
+ def warn(self, *args):
+ """
+ This is for checking the warn function was called, we replace the logger
+ in the tested module.
+ """
+ self.__warn_called = True
+
+ def commit(self):
+ """
+ This is part of pretending to be a zone updater. This notes the commit
+ was called.
+ """
+ self.__commit_called = True
+
+ def add_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ addition of an rrset was requested.
+ """
+ self.__data_operations.append(('add', rrset))
+
+ def delete_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ removal of an rrset was requested.
+ """
+ self.__data_operations.append(('delete', rrset))
+
+ def get_class(self):
+ """
+ This one is part of pretending to be a zone updater. It returns
+ the IN class.
+ """
+ return self.__rrclass
+
+ def get_updater(self, zone_name, replace):
+ """
+ This one pretends this is the data source client and serves
+ getting an updater.
+
+ If zone_name is 'none.example.org.', it returns None, otherwise
+ it returns self.
+ """
+ # The diff should not delete the old data.
+ self.assertEqual(self.__should_replace, replace)
+ self.__updater_requested = True
+ # Pretend this zone doesn't exist
+ if zone_name == Name('none.example.org.'):
+ return None
+ else:
+ return self
+
+ def test_create(self):
+ """
+ This test the case when the diff is successfuly created. It just
+ tries it does not throw and gets the updater.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.assertTrue(self.__updater_requested)
+ self.assertEqual([], diff.get_buffer())
+
+ def test_create_nonexist(self):
+ """
+ Try to create a diff on a zone that doesn't exist. This should
+ raise a correct exception.
+ """
+ self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
+ self.assertTrue(self.__updater_requested)
+
+ def __data_common(self, diff, method, operation):
+ """
+ Common part of test for test_add and test_delte.
+ """
+ # Try putting there the bad data first
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertRaises(ValueError, method, self.__rrset_multi)
+ # They were not added
+ self.assertEqual([], diff.get_buffer())
+ # Put some proper data into the diff
+ method(self.__rrset1)
+ method(self.__rrset2)
+ dlist = [(operation, self.__rrset1), (operation, self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Check the data are not destroyed by raising an exception because of
+ # bad data
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertEqual(dlist, diff.get_buffer())
+
+ def test_add(self):
+ """
+ Try to add few items into the diff and see they are stored in there.
+
+ Also try passing an rrset that has differnt amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.add_data, 'add')
+
+ def test_delete(self):
+ """
+ Try scheduling removal of few items into the diff and see they are
+ stored in there.
+
+ Also try passing an rrset that has different amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.delete_data, 'delete')
+
+ def test_apply(self):
+ """
+ Schedule few additions and check the apply works by passing the
+ data into the updater.
+ """
+ # Prepare the diff
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.delete_data(self.__rrset2)
+ dlist = [('add', self.__rrset1), ('delete', self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Do the apply, hook the compact method
+ diff.compact = self.__mock_compact
+ diff.apply()
+ # It should call the compact
+ self.assertTrue(self.__compact_called)
+ # And pass the data. Our local history of what happened is the same
+ # format, so we can check the same way
+ self.assertEqual(dlist, self.__data_operations)
+ # And the buffer in diff should become empty, as everything
+ # got inside.
+ self.assertEqual([], diff.get_buffer())
+
+ def test_commit(self):
+ """
+ If we call a commit, it should first apply whatever changes are
+ left (we hook into that instead of checking the effect) and then
+ the commit on the updater should have been called.
+
+ Then we check it raises value error for whatever operation we try.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ orig_apply = diff.apply
+ diff.apply = self.__mock_apply
+ diff.commit()
+ self.assertTrue(self.__apply_called)
+ self.assertTrue(self.__commit_called)
+ # The data should be handled by apply which we replaced.
+ self.assertEqual([], self.__data_operations)
+ # Now check all range of other methods raise ValueError
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
+ diff.apply = orig_apply
+ self.assertRaises(ValueError, diff.apply)
+ # This one does not state it should raise, so check it doesn't
+ # But it is NOP in this situation anyway
+ diff.compact()
+
+ def test_autoapply(self):
+ """
+ Test the apply is called all by itself after 100 tasks are added.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # A method to check the apply is called _after_ the 100th element
+ # is added. We don't use it anywhere else, so we define it locally
+ # as lambda function
+ def check():
+ self.assertEqual(100, len(diff.get_buffer()))
+ self.__mock_apply()
+ orig_apply = diff.apply
+ diff.apply = check
+ # If we put 99, nothing happens yet
+ for i in range(0, 99):
+ diff.add_data(self.__rrset1)
+ expected = [('add', self.__rrset1)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ # Now we push the 100th and it should call the apply method
+ # This will _not_ flush the data yet, as we replaced the method.
+ # It, however, would in the real life.
+ diff.add_data(self.__rrset1)
+ # Now the apply method (which is replaced by our check) should
+ # have been called. If it wasn't, this is false. If it was, but
+ # still with 99 elements, the check would complain
+ self.assertTrue(self.__apply_called)
+ # Reset the buffer by calling the original apply.
+ orig_apply()
+ self.assertEqual([], diff.get_buffer())
+ # Similar with delete
+ self.__apply_called = False
+ for i in range(0, 99):
+ diff.delete_data(self.__rrset2)
+ expected = [('delete', self.__rrset2)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ diff.delete_data(self.__rrset2)
+ self.assertTrue(self.__apply_called)
+
+ def test_compact(self):
+ """
+ Test the compaction works as expected, eg. it compacts only consecutive
+ changes of the same operation and on the same domain/type.
+
+ The test case checks that it does merge them, but also puts some
+ different operations "in the middle", changes the type and name and
+ places the same kind of change further away of each other to see they
+ are not merged in that case.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # Check we can do a compact on empty data, it shouldn't break
+ diff.compact()
+ self.assertEqual([], diff.get_buffer())
+ # This data is the way it should look like after the compact
+ # ('operation', 'domain.prefix', 'type', ['rdata', 'rdata'])
+ # The notes say why the each of consecutive can't be merged
+ data = [
+ ('add', 'a', 'A', ['192.0.2.1', '192.0.2.2']),
+ # Different type.
+ ('add', 'a', 'AAAA', ['2001:db8::1', '2001:db8::2']),
+ # Different operation
+ ('delete', 'a', 'AAAA', ['2001:db8::3']),
+ # Different domain
+ ('delete', 'b', 'AAAA', ['2001:db8::4']),
+ # This does not get merged with the first, even if logically
+ # possible. We just don't do this.
+ ('add', 'a', 'A', ['192.0.2.3'])
+ ]
+ # Now, fill the data into the diff, in a "flat" way, one by one
+ for (op, nprefix, rrtype, rdata) in data:
+ name = Name(nprefix + '.example.org.')
+ rrtype_obj = RRType(rrtype)
+ for rdatum in rdata:
+ rrset = RRset(name, self.__rrclass, rrtype_obj, self.__ttl)
+ rrset.add_rdata(Rdata(rrtype_obj, self.__rrclass, rdatum))
+ if op == 'add':
+ diff.add_data(rrset)
+ else:
+ diff.delete_data(rrset)
+ # Compact it
+ diff.compact()
+ # Now check they got compacted. They should be in the same order as
+ # pushed inside. So it should be the same as data modulo being in
+ # the rrsets and isc.dns objects.
+ def check():
+ buf = diff.get_buffer()
+ self.assertEqual(len(data), len(buf))
+ for (expected, received) in zip(data, buf):
+ (eop, ename, etype, edata) = expected
+ (rop, rrrset) = received
+ self.assertEqual(eop, rop)
+ ename_obj = Name(ename + '.example.org.')
+ self.assertEqual(ename_obj, rrrset.get_name())
+ # We check on names to make sure they are printed nicely
+ self.assertEqual(etype, str(rrrset.get_type()))
+ rdata = rrrset.get_rdata()
+ self.assertEqual(len(edata), len(rdata))
+ # It should also preserve the order
+ for (edatum, rdatum) in zip(edata, rdata):
+ self.assertEqual(edatum, str(rdatum))
+ check()
+ # Try another compact does nothing, but survives
+ diff.compact()
+ check()
+
+ def test_wrong_class(self):
+ """
+ Test a wrong class of rrset is rejected.
+ """
+ diff = Diff(self, Name('example.org.'))
+ rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+ self.__ttl)
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+ self.assertRaises(ValueError, diff.add_data, rrset)
+ self.assertRaises(ValueError, diff.delete_data, rrset)
+
+ def __do_raise_test(self):
+ """
+ Do a raise test. Expects that one of the operations is exchanged for
+ broken version.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.delete_data(self.__rrset2)
+ self.assertRaises(TestError, diff.commit)
+ self.assertTrue(self.__broken_called)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset1)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.apply)
+
+ def test_raise_add(self):
+ """
+ Test the exception from add_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.add_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_delete(self):
+ """
+ Test the exception from delete_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.delete_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_commit(self):
+ """
+ Test the exception from updater's commit gets propagated and it can't be
+ used afterwards.
+ """
+ self.commit = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_ttl(self):
+ """
+ Test the TTL handling. A warn function should have been called if they
+ differ, but that's all, it should not crash or raise.
+ """
+ orig_logger = isc.xfrin.diff.logger
+ try:
+ isc.xfrin.diff.logger = self
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(120))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.2'))
+ diff.add_data(rrset2)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(6000))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.3'))
+ diff.add_data(rrset2)
+ # They should get compacted together and complain.
+ diff.compact()
+ self.assertEqual(1, len(diff.get_buffer()))
+ # The TTL stays on the first value, no matter if smaller or bigger
+ # ones come later.
+ self.assertEqual(self.__ttl, diff.get_buffer()[0][1].get_ttl())
+ self.assertTrue(self.__warn_called)
+ finally:
+ isc.xfrin.diff.logger = orig_logger
+
+ def test_relpace(self):
+ """
+ Test that when we want to replace the whole zone, it is propagated.
+ """
+ self.__should_replace = True
+ diff = Diff(self, "example.org.", True)
+ self.assertTrue(self.__updater_requested)
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index 0ddc9e1..eac12d2 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -983,8 +983,10 @@ public:
client_timer.cancel();
if (outstanding_events_ > 0) {
return;
+#ifndef _MSC_VER
} else {
delete this;
+#endif
}
}
diff --git a/src/lib/resolve/tests/recursive_query_unittest.cc b/src/lib/resolve/tests/recursive_query_unittest.cc
index e9b40ba..01a6428 100644
--- a/src/lib/resolve/tests/recursive_query_unittest.cc
+++ b/src/lib/resolve/tests/recursive_query_unittest.cc
@@ -788,11 +788,11 @@ TEST_F(RecursiveQueryTest, forwardQueryTimeout) {
Question question(Name("example.net"), RRClass::IN(), RRType::A());
OutputBufferPtr buffer(new OutputBuffer(0));
MessagePtr answer(new Message(Message::RENDER));
- Message *query_message = new Message(Message::RENDER);
- isc::resolve::initResponseMessage(question, *query_message);
+ Message query_message(Message::RENDER);
+ isc::resolve::initResponseMessage(question, query_message);
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
- query.forward(ConstMessagePtr(query_message), answer, buffer, &server, callback);
+ query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
io_service_->run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
@@ -823,11 +823,11 @@ TEST_F(RecursiveQueryTest, forwardClientTimeout) {
1000, 10, 4000, 4);
Question q(Name("example.net"), RRClass::IN(), RRType::A());
OutputBufferPtr buffer(new OutputBuffer(0));
- Message *query_message = new Message(Message::RENDER);
- isc::resolve::initResponseMessage(q, *query_message);
+ Message query_message(Message::RENDER);
+ isc::resolve::initResponseMessage(q, query_message);
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
- query.forward(ConstMessagePtr(query_message), answer, buffer, &server, callback);
+ query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
io_service_->run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
@@ -858,11 +858,11 @@ TEST_F(RecursiveQueryTest, forwardLookupTimeout) {
Question question(Name("example.net"), RRClass::IN(), RRType::A());
OutputBufferPtr buffer(new OutputBuffer(0));
- Message *query_message = new Message(Message::RENDER);
- isc::resolve::initResponseMessage(question, *query_message);
+ Message query_message(Message::RENDER);
+ isc::resolve::initResponseMessage(question, query_message);
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
- query.forward(ConstMessagePtr(query_message), answer, buffer, &server, callback);
+ query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
io_service_->run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
@@ -893,11 +893,11 @@ TEST_F(RecursiveQueryTest, lowtimeouts) {
Question question(Name("example.net"), RRClass::IN(), RRType::A());
OutputBufferPtr buffer(new OutputBuffer(0));
- Message *query_message = new Message(Message::RENDER);
- isc::resolve::initResponseMessage(question, *query_message);
+ Message query_message(Message::RENDER);
+ isc::resolve::initResponseMessage(question, query_message);
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
- query.forward(ConstMessagePtr(query_message), answer, buffer, &server, callback);
+ query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
io_service_->run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
diff --git a/src/lib/testutils/Makefile.am b/src/lib/testutils/Makefile.am
index ae5c6da..a511d24 100644
--- a/src/lib/testutils/Makefile.am
+++ b/src/lib/testutils/Makefile.am
@@ -5,7 +5,7 @@ AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS=$(B10_CXXFLAGS)
if HAVE_GTEST
-lib_LTLIBRARIES = libtestutils.la
+noinst_LTLIBRARIES = libtestutils.la
libtestutils_la_SOURCES = srv_test.h srv_test.cc
libtestutils_la_SOURCES += dnsmessage_test.h dnsmessage_test.cc
diff --git a/src/lib/util/pyunittests/Makefile.am b/src/lib/util/pyunittests/Makefile.am
index 63ccf2a..dd2d39a 100644
--- a/src/lib/util/pyunittests/Makefile.am
+++ b/src/lib/util/pyunittests/Makefile.am
@@ -2,7 +2,8 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-pyexec_LTLIBRARIES = pyunittests_util.la
+noinst_LTLIBRARIES = pyunittests_util.la
+
pyunittests_util_la_SOURCES = pyunittests_util.cc
pyunittests_util_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
pyunittests_util_la_LDFLAGS = $(PYTHON_LDFLAGS)
@@ -15,3 +16,7 @@ pyunittests_util_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
pyunittests_util_la_LDFLAGS += -module
pyunittests_util_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
pyunittests_util_la_LIBADD += $(PYTHON_LIB)
+
+# hack to trigger libtool to not create a convenience archive,
+# resulting in shared modules
+pyunittests_util_la_LDFLAGS += -rpath /nowhere
diff --git a/src/lib/util/unittests/Makefile.am b/src/lib/util/unittests/Makefile.am
index 83235f2..bbb0d49 100644
--- a/src/lib/util/unittests/Makefile.am
+++ b/src/lib/util/unittests/Makefile.am
@@ -1,7 +1,7 @@
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CXXFLAGS = $(B10_CXXFLAGS)
-lib_LTLIBRARIES = libutil_unittests.la
+noinst_LTLIBRARIES = libutil_unittests.la
libutil_unittests_la_SOURCES = fork.h fork.cc resolver.h
libutil_unittests_la_SOURCES += newhook.h newhook.cc
libutil_unittests_la_SOURCES += testdata.h testdata.cc
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 6923c41..49ef0f1 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -24,6 +24,10 @@ SYSTEMTESTTOP=..
status=0
n=0
+# TODO: consider consistency with statistics definition in auth.spec
+auth_queries_tcp="\<queries\.tcp\>"
+auth_queries_udp="\<queries\.udp\>"
+
echo "I:Checking b10-auth is working by default ($n)"
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
# perform a simple check on the output (digcomp would be too much for this)
@@ -40,8 +44,8 @@ echo 'Stats show
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# the server should have received 1 UDP and 1 TCP queries (TCP query was
# sent from the server startup script)
-grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<1\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -73,8 +77,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters should have been reset while stop/start.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -97,8 +101,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters shouldn't be reset due to hot-swapping datasource.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<2\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
diff --git a/tests/system/cleanall.sh b/tests/system/cleanall.sh
index 17c3d4a..d23d103 100755
--- a/tests/system/cleanall.sh
+++ b/tests/system/cleanall.sh
@@ -27,7 +27,10 @@ find . -type f \( \
status=0
-for d in `find . -type d -maxdepth 1 -mindepth 1 -print`
+for d in ./.* ./*
do
+ case $d in ./.|./..) continue ;; esac
+ test -d $d || continue
+
test ! -f $d/clean.sh || ( cd $d && sh clean.sh )
done
diff --git a/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj b/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj
index c872971..d6c4585 100755
--- a/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj
+++ b/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -94,14 +94,17 @@ copy spec_config.h.win32 spec_config.h
</PreBuildEvent>
</ItemDefinitionGroup>
<ItemGroup>
+ <ClInclude Include="..\..\..\src\bin\dhcp6\dhcp6.h" />
<ClInclude Include="..\..\..\src\bin\dhcp6\dhcp6_srv.h" />
<ClInclude Include="..\..\..\src\bin\dhcp6\iface_mgr.h" />
+ <ClInclude Include="..\..\..\src\bin\dhcp6\pkt6.h" />
<ClInclude Include="..\..\getopt.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\bin\dhcp6\dhcp6_srv.cc" />
<ClCompile Include="..\..\..\src\bin\dhcp6\iface_mgr.cc" />
<ClCompile Include="..\..\..\src\bin\dhcp6\main.cc" />
+ <ClCompile Include="..\..\..\src\bin\dhcp6\pkt6.cc" />
<ClCompile Include="..\..\getopt.cc" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
diff --git a/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj.filters b/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj.filters
index 62adc75..8ee906c 100755
--- a/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj.filters
+++ b/win32build/VS2010/b10-dhcp6/b10-dhcp6.vcxproj.filters
@@ -24,6 +24,12 @@
<ClInclude Include="..\..\getopt.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="..\..\..\src\bin\dhcp6\dhcp6.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\dhcp6\pkt6.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\bin\dhcp6\dhcp6_srv.cc">
@@ -38,5 +44,8 @@
<ClCompile Include="..\..\getopt.cc">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\..\src\bin\dhcp6\pkt6.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj b/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj
index 1cad2c8..73c04d0 100755
--- a/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj
+++ b/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -52,7 +52,7 @@
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>WIN32;TEST_DATA_DIR="%BIND10HOME%/src/lib/testutils/testdata";TEST_DATA_BUILDDIR="%BIND10HOME%src/bin/dhcp6/tests";_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\bin;..\..\..\ext\asio;..\..\..\..\gtest\include;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<CompileAs>CompileAsCpp</CompileAs>
</ClCompile>
@@ -71,7 +71,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>WIN32;TEST_DATA_DIR="%BIND10HOME%/src/lib/testutils/testdata";TEST_DATA_BUILDDIR="%BIND10HOME%src/bin/dhcp6/tests";NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\bin;..\..\..\ext\asio;..\..\..\..\gtest\include;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<CompileAs>CompileAsCpp</CompileAs>
</ClCompile>
@@ -88,13 +88,16 @@
<ItemGroup>
<ClInclude Include="..\..\..\src\bin\dhcp6\dhcp6_srv.h" />
<ClInclude Include="..\..\..\src\bin\dhcp6\iface_mgr.h" />
+ <ClInclude Include="..\..\..\src\bin\dhcp6\pkt6.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\bin\dhcp6\dhcp6_srv.cc" />
<ClCompile Include="..\..\..\src\bin\dhcp6\iface_mgr.cc" />
+ <ClCompile Include="..\..\..\src\bin\dhcp6\pkt6.cc" />
<ClCompile Include="..\..\..\src\bin\dhcp6\tests\dhcp6_srv_unittest.cc" />
<ClCompile Include="..\..\..\src\bin\dhcp6\tests\dhcp6_unittests.cc" />
<ClCompile Include="..\..\..\src\bin\dhcp6\tests\iface_mgr_unittest.cc" />
+ <ClCompile Include="..\..\..\src\bin\dhcp6\tests\pkt6_unittest.cc" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
diff --git a/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj.filters b/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj.filters
index e08588f..774c898 100755
--- a/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj.filters
+++ b/win32build/VS2010/b10-dhcp6_tests/b10-dhcp6_tests.vcxproj.filters
@@ -21,6 +21,9 @@
<ClInclude Include="..\..\..\src\bin\dhcp6\iface_mgr.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="..\..\..\src\bin\dhcp6\pkt6.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\bin\dhcp6\dhcp6_srv.cc">
@@ -38,5 +41,11 @@
<ClCompile Include="..\..\..\src\bin\dhcp6\tests\iface_mgr_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\..\src\bin\dhcp6\tests\pkt6_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\dhcp6\pkt6.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/bind10.sln b/win32build/VS2010/bind10.sln
index 192fa4f..2a4394a 100755
--- a/win32build/VS2010/bind10.sln
+++ b/win32build/VS2010/bind10.sln
@@ -152,6 +152,9 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libcfgclient_tests", "libcf
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblog", "liblog\liblog.vcxproj", "{AEF3DFFE-B566-4E6A-B299-B59B81022C06}"
+ ProjectSection(ProjectDependencies) = postProject
+ {2844FDFB-A0A1-4FA4-A654-15D69CC717DD} = {2844FDFB-A0A1-4FA4-A654-15D69CC717DD}
+ EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblog_tests", "liblog_tests\liblog_tests.vcxproj", "{9F69DE07-D285-4B5C-8528-DF975C59ED3B}"
ProjectSection(ProjectDependencies) = postProject
@@ -277,7 +280,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libdatasrc_tests", "libdata
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libserver_common", "libserver_common\libserver_common.vcxproj", "{66C9A5EC-514B-4BDC-AC74-ED4CB465CAAF}"
ProjectSection(ProjectDependencies) = postProject
- {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1} = {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1}
+ {32624520-5341-471B-B88D-2599DBCDABF5} = {32624520-5341-471B-B88D-2599DBCDABF5}
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libserver_common_tests", "libserver_common_tests\libserver_common_tests.vcxproj", "{D09B618B-D0E4-468D-A4BD-E204B4344C18}"
@@ -333,7 +336,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pydutil_io", "pydutil_io\py
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libxfr", "libxfr\libxfr.vcxproj", "{761E7D88-6CCB-4E41-9F1E-6C1FBBD062F5}"
ProjectSection(ProjectDependencies) = postProject
- {33927325-C9B5-4FE6-B69F-318433AFF4BD} = {33927325-C9B5-4FE6-B69F-318433AFF4BD}
+ {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1} = {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1}
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libdhcp", "libdhcp\libdhcp.vcxproj", "{F27BC0D0-A334-4DC0-9DC9-880D5DA74524}"
@@ -365,7 +368,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "b10-dhcp6_tests", "b10-dhcp
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libacl", "libacl\libacl.vcxproj", "{635B804D-1B52-433E-9ECD-84F507FDB1F1}"
ProjectSection(ProjectDependencies) = postProject
- {32624520-5341-471B-B88D-2599DBCDABF5} = {32624520-5341-471B-B88D-2599DBCDABF5}
+ {33927325-C9B5-4FE6-B69F-318433AFF4BD} = {33927325-C9B5-4FE6-B69F-318433AFF4BD}
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libdnsacl", "libdnsacl\libdnsacl.vcxproj", "{EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1}"
diff --git a/win32build/VS2010/libbench_tests/libbench_tests.vcxproj b/win32build/VS2010/libbench_tests/libbench_tests.vcxproj
index c2850e6..2cb54d9 100755
--- a/win32build/VS2010/libbench_tests/libbench_tests.vcxproj
+++ b/win32build/VS2010/libbench_tests/libbench_tests.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -60,8 +60,8 @@
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<OutputFile>$(OutDir)run_unittests$(TargetExt)</OutputFile>
- <AdditionalDependencies>libexceptions.lib;libutil.lib;libcryptolink.lib;libdns++.lib;libbench.lib;botan.lib;gtestd.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libbench\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;libcryptolink.lib;libdns++.lib;libbench.lib;botan.lib;gtestd.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libutil_unittests\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libbench\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -81,8 +81,8 @@
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<OutputFile>$(OutDir)run_unittests$(TargetExt)</OutputFile>
- <AdditionalDependencies>libexceptions.lib;libutil.lib;libcryptolink.lib;libdns++.lib;libbench.lib;botan.lib;gtest.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libbench\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;libcryptolink.lib;libdns++.lib;libbench.lib;botan.lib;gtest.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libutil_unittests\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libbench\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
diff --git a/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj b/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj
index ef49cd9..84aae51 100755
--- a/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj
+++ b/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -60,8 +60,8 @@
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<OutputFile>$(OutDir)run_unittests$(TargetExt)</OutputFile>
- <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;liblog.lib;libcc.lib;libcfgclient.lib;log4cplusSD.lib;gtestd.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libutil_unittests\$(Configuration);..\liblog\$(Configuration);..\libcc\$(Configuration);..\libcfgclient\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;liblog.lib;libcc.lib;libcfgclient.lib;libfake_session.lib;log4cplusSD.lib;gtestd.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libutil_unittests\$(Configuration);..\liblog\$(Configuration);..\libcc\$(Configuration);..\libcfgclient\$(Configuration);..\libfake_session\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
<PreBuildEvent>
<Command>cd ..\..\..\src\lib\config\tests
@@ -86,8 +86,8 @@ copy data_def_unittests_config.h.win32 data_def_unittests_config.h
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<OutputFile>$(OutDir)run_unittests$(TargetExt)</OutputFile>
- <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;liblog.lib;libcc.lib;libcfgclient.lib;log4cplusS.lib;gtest.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libutil_unittests\$(Configuration);..\liblog\$(Configuration);..\libcc\$(Configuration);..\libcfgclient\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;liblog.lib;libcc.lib;libcfgclient.lib;libfake_session.lib;log4cplusS.lib;gtest.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\libutil_unittests\$(Configuration);..\liblog\$(Configuration);..\libcc\$(Configuration);..\libcfgclient\$(Configuration);..\libfake_session\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);..\..\..\..\gtest\md10\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
<PreBuildEvent>
<Command>cd ..\..\..\src\lib\config\tests
@@ -97,12 +97,10 @@ copy data_def_unittests_config.h.win32 data_def_unittests_config.h
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\..\src\lib\config\tests\data_def_unittests_config.h" />
- <ClInclude Include="..\..\..\src\lib\config\tests\fake_session.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\lib\config\tests\ccsession_unittests.cc" />
<ClCompile Include="..\..\..\src\lib\config\tests\config_data_unittests.cc" />
- <ClCompile Include="..\..\..\src\lib\config\tests\fake_session.cc" />
<ClCompile Include="..\..\..\src\lib\config\tests\module_spec_unittests.cc" />
<ClCompile Include="..\..\..\src\lib\config\tests\run_unittests.cc" />
</ItemGroup>
diff --git a/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj.filters b/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj.filters
index 392fb3b..1ee90cb 100755
--- a/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj.filters
+++ b/win32build/VS2010/libcfgclient_tests/libcfgclient_tests.vcxproj.filters
@@ -18,9 +18,6 @@
<ClInclude Include="..\..\..\src\lib\config\tests\data_def_unittests_config.h">
<Filter>Header Files</Filter>
</ClInclude>
- <ClInclude Include="..\..\..\src\lib\config\tests\fake_session.h">
- <Filter>Header Files</Filter>
- </ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\lib\config\tests\ccsession_unittests.cc">
@@ -29,9 +26,6 @@
<ClCompile Include="..\..\..\src\lib\config\tests\config_data_unittests.cc">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\..\src\lib\config\tests\fake_session.cc">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\..\src\lib\config\tests\module_spec_unittests.cc">
<Filter>Source Files</Filter>
</ClCompile>
diff --git a/win32build/VS2010/libdatasrc/libdatasrc.vcxproj b/win32build/VS2010/libdatasrc/libdatasrc.vcxproj
index 9342a26..15f0bb1 100755
--- a/win32build/VS2010/libdatasrc/libdatasrc.vcxproj
+++ b/win32build/VS2010/libdatasrc/libdatasrc.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -16,6 +16,7 @@
<ClInclude Include="..\..\..\src\lib\datasrc\database.h" />
<ClInclude Include="..\..\..\src\lib\datasrc\datasrc_messages.h" />
<ClInclude Include="..\..\..\src\lib\datasrc\data_source.h" />
+ <ClInclude Include="..\..\..\src\lib\datasrc\factory.h" />
<ClInclude Include="..\..\..\src\lib\datasrc\iterator.h" />
<ClInclude Include="..\..\..\src\lib\datasrc\logger.h" />
<ClInclude Include="..\..\..\src\lib\datasrc\memory_datasrc.h" />
@@ -33,6 +34,7 @@
<ClCompile Include="..\..\..\src\lib\datasrc\database.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\datasrc_messages.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\data_source.cc" />
+ <ClCompile Include="..\..\..\src\lib\datasrc\factory.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\logger.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\memory_datasrc.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\query.cc" />
diff --git a/win32build/VS2010/libdatasrc/libdatasrc.vcxproj.filters b/win32build/VS2010/libdatasrc/libdatasrc.vcxproj.filters
index d8d01ca..5ada443 100755
--- a/win32build/VS2010/libdatasrc/libdatasrc.vcxproj.filters
+++ b/win32build/VS2010/libdatasrc/libdatasrc.vcxproj.filters
@@ -21,9 +21,6 @@
<ClInclude Include="..\..\..\src\lib\datasrc\data_source.h">
<Filter>Header Files</Filter>
</ClInclude>
- <ClInclude Include="..\..\..\src\lib\datasrc\memory_datasrc.h">
- <Filter>Header Files</Filter>
- </ClInclude>
<ClInclude Include="..\..\..\src\lib\datasrc\query.h">
<Filter>Header Files</Filter>
</ClInclude>
@@ -57,10 +54,16 @@
<ClInclude Include="..\..\..\src\lib\datasrc\logger.h">
<Filter>Header Files</Filter>
</ClInclude>
- <ClInclude Include="..\..\..\src\lib\datasrc\sqlite3_accessor.h">
+ <ClInclude Include="..\..\..\src\lib\datasrc\datasrc_messages.h">
<Filter>Header Files</Filter>
</ClInclude>
- <ClInclude Include="..\..\..\src\lib\datasrc\datasrc_messages.h">
+ <ClInclude Include="..\..\..\src\lib\datasrc\factory.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\lib\datasrc\memory_datasrc.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\lib\datasrc\sqlite3_accessor.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
@@ -71,9 +74,6 @@
<ClCompile Include="..\..\..\src\lib\datasrc\data_source.cc">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\..\src\lib\datasrc\memory_datasrc.cc">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\..\src\lib\datasrc\query.cc">
<Filter>Source Files</Filter>
</ClCompile>
@@ -95,6 +95,12 @@
<ClCompile Include="..\..\..\src\lib\datasrc\logger.cc">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\..\src\lib\datasrc\factory.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\lib\datasrc\memory_datasrc.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="..\..\..\src\lib\datasrc\sqlite3_accessor.cc">
<Filter>Source Files</Filter>
</ClCompile>
diff --git a/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj b/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj
index 04615ff..0a49f23 100755
--- a/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj
+++ b/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -94,6 +94,7 @@
<ClCompile Include="..\..\..\src\lib\datasrc\tests\client_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\tests\database_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\tests\datasrc_unittest.cc" />
+ <ClCompile Include="..\..\..\src\lib\datasrc\tests\factory_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\tests\logger_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\tests\memory_datasrc_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\datasrc\tests\query_unittest.cc" />
diff --git a/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj.filters b/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj.filters
index c7df231..d774595 100755
--- a/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj.filters
+++ b/win32build/VS2010/libdatasrc_tests/libdatasrc_tests.vcxproj.filters
@@ -68,5 +68,8 @@
<ClCompile Include="..\..\..\src\lib\datasrc\tests\sqlite3_accessor_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\..\src\lib\datasrc\tests\factory_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/libdns++/libdns++.vcxproj b/win32build/VS2010/libdns++/libdns++.vcxproj
index 3b92850..7675fd7 100755
--- a/win32build/VS2010/libdns++/libdns++.vcxproj
+++ b/win32build/VS2010/libdns++/libdns++.vcxproj
@@ -24,8 +24,10 @@
<ClInclude Include="..\..\..\src\lib\dns\rdata.h" />
<ClInclude Include="..\..\..\src\lib\dns\rdataclass.h" />
<ClInclude Include="..\..\..\src\lib\dns\rdatafields.h" />
+ <ClInclude Include="..\..\..\src\lib\dns\rdata\generic\detail\ds_like.h" />
<ClInclude Include="..\..\..\src\lib\dns\rdata\generic\detail\nsec_bitmap.h" />
<ClInclude Include="..\..\..\src\lib\dns\rdata\generic\detail\txt_like.h" />
+ <ClInclude Include="..\..\..\src\lib\dns\rdata\generic\dlv_32769.h" />
<ClInclude Include="..\..\..\src\lib\dns\rrclass.h" />
<ClInclude Include="..\..\..\src\lib\dns\rrparamregistry.h" />
<ClInclude Include="..\..\..\src\lib\dns\rrset.h" />
diff --git a/win32build/VS2010/libdns++/libdns++.vcxproj.filters b/win32build/VS2010/libdns++/libdns++.vcxproj.filters
index 1605ec4..bb218b1 100755
--- a/win32build/VS2010/libdns++/libdns++.vcxproj.filters
+++ b/win32build/VS2010/libdns++/libdns++.vcxproj.filters
@@ -90,6 +90,12 @@
<ClInclude Include="..\..\..\src\lib\dns\rdata\generic\detail\txt_like.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="..\..\..\src\lib\dns\rdata\generic\detail\ds_like.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\lib\dns\rdata\generic\dlv_32769.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\lib\dns\rdata\generic\detail\nsec_bitmap.cc">
diff --git a/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj b/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj
index 1764b05..d371fea 100755
--- a/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj
+++ b/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -328,9 +328,10 @@ python %BIND10HOME%/src/lib/util/python/gen_wiredata.py -o tsig_verify9.wire tsi
<ClCompile Include="..\..\..\src\lib\dns\tests\rdatafields_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_afsdb_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_cname_unittest.cc" />
+ <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_dhcid_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_dname_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_dnskey_unittest.cc" />
- <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_ds_unittest.cc" />
+ <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_ds_like_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_hinfo_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_in_aaaa_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_in_a_unittest.cc" />
@@ -349,7 +350,7 @@ python %BIND10HOME%/src/lib/util/python/gen_wiredata.py -o tsig_verify9.wire tsi
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_soa_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_srv_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_tsig_unittest.cc" />
- <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_txt_unittest.cc" />
+ <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_txt_like_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rrclass_unittest.cc" />
<ClCompile Include="..\..\..\src\lib\dns\tests\rrparamregistry_unittest.cc" />
diff --git a/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj.filters b/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj.filters
index cb353be..387c39d 100755
--- a/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj.filters
+++ b/win32build/VS2010/libdns++_tests/libdns++_tests.vcxproj.filters
@@ -56,9 +56,6 @@
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_dnskey_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_ds_unittest.cc">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_in_a_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
@@ -101,9 +98,6 @@
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_tsig_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_txt_unittest.cc">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\..\src\lib\dns\tests\rdata_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
@@ -164,5 +158,14 @@
<ClCompile Include="..\..\..\src\lib\dns\tests\tsigrecord_unittest.cc">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_dhcid_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_ds_like_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\lib\dns\tests\rdata_txt_like_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/pyddatasrc/pyddatasrc.vcxproj b/win32build/VS2010/pyddatasrc/pyddatasrc.vcxproj
index fc34f55..3053d47 100755
--- a/win32build/VS2010/pyddatasrc/pyddatasrc.vcxproj
+++ b/win32build/VS2010/pyddatasrc/pyddatasrc.vcxproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
@@ -62,8 +62,8 @@
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<OutputFile>$(OutDir)datasrc.pyd</OutputFile>
- <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\liblog\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libdns++_python\$(Configuration);..\libdatasrc\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);C:\Python32\libs;C:\sqlite3;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
- <AdditionalDependencies>libexceptions.lib;libutil.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libdns++_python.lib;libdatasrc.lib;botan.lib;log4cplusSD.lib;sqlite3.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\liblog\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libdns++_python\$(Configuration);..\libcc\$(Configuration);..\libdatasrc\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);C:\Python32\libs;C:\sqlite3;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libdns++_python.lib;libcc.lib;libdatasrc.lib;botan.lib;log4cplusSD.lib;sqlite3.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -83,8 +83,8 @@
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<OutputFile>$(OutDir)datasrc.pyd</OutputFile>
- <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\liblog\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libdns++_python\$(Configuration);..\libdatasrc\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);C:\Python32\libs;C:\sqlite3;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
- <AdditionalDependencies>libexceptions.lib;libutil.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libdns++_python.lib;libdatasrc.lib;botan.lib;log4cplusS.lib;sqlite3.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\libexceptions\$(Configuration);..\libutil\$(Configuration);..\liblog\$(Configuration);..\libcryptolink\$(Configuration);..\libdns++\$(Configuration);..\libdns++_python\$(Configuration);..\libcc\$(Configuration);..\libdatasrc\$(Configuration);..\..\..\..\botan\md10\$(Configuration);..\..\..\..\log4cplus\md10\$(Configuration);C:\Python32\libs;C:\sqlite3;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libdns++_python.lib;libcc.lib;libdatasrc.lib;botan.lib;log4cplusS.lib;sqlite3.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
More information about the bind10-changes
mailing list