BIND 10 trac826, updated. 6143a6ef5e3dc5962e5c8b61aeff2574fe08705d add libs server_common python dhcp & statistics
BIND 10 source code commits
bind10-changes at lists.isc.org
Sat Jun 30 23:38:10 UTC 2012
The branch, trac826 has been updated
via 6143a6ef5e3dc5962e5c8b61aeff2574fe08705d (commit)
from 44b16960024565548b7a214ad973f844b35f7832 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 6143a6ef5e3dc5962e5c8b61aeff2574fe08705d
Author: Francis Dupont <fdupont at isc.org>
Date: Sun Jul 1 01:37:26 2012 +0200
add libs server_common python dhcp & statistics
-----------------------------------------------------------------------
Summary of changes:
src/lib/dhcp/Makefile.am | 45 +-
src/lib/dhcp/dhcp4.h | 272 ++--
src/lib/dhcp/dhcp6.h | 9 +
src/lib/dhcp/iface_mgr.cc | 1050 +++++++++++++
src/lib/dhcp/iface_mgr.h | 556 +++++++
src/lib/dhcp/iface_mgr_bsd.cc | 55 +
src/lib/dhcp/iface_mgr_linux.cc | 576 ++++++++
src/lib/dhcp/iface_mgr_sun.cc | 55 +
src/lib/dhcp/libdhcp++.cc | 187 +++
src/lib/dhcp/libdhcp++.h | 96 ++
src/lib/dhcp/libdhcp.cc | 131 --
src/lib/dhcp/libdhcp.h | 83 --
src/lib/dhcp/option.cc | 292 ++--
src/lib/dhcp/option.h | 277 ++--
src/lib/dhcp/option4_addrlst.cc | 143 ++
src/lib/dhcp/option4_addrlst.h | 164 +++
src/lib/dhcp/option6_addrlst.cc | 89 +-
src/lib/dhcp/option6_addrlst.h | 72 +-
src/lib/dhcp/option6_ia.cc | 101 +-
src/lib/dhcp/option6_ia.h | 64 +-
src/lib/dhcp/option6_iaaddr.cc | 89 +-
src/lib/dhcp/option6_iaaddr.h | 57 +-
src/lib/dhcp/pkt4.cc | 213 ++-
src/lib/dhcp/pkt4.h | 398 +++--
src/lib/dhcp/pkt6.cc | 161 +-
src/lib/dhcp/pkt6.h | 247 +++-
src/lib/dhcp/tests/.gitignore | 1 +
src/lib/dhcp/tests/Makefile.am | 64 +-
src/lib/dhcp/tests/iface_mgr_unittest.cc | 951 ++++++++++++
src/lib/dhcp/tests/libdhcp++_unittest.cc | 230 +++
src/lib/dhcp/tests/libdhcp_unittest.cc | 141 --
src/lib/dhcp/tests/option4_addrlst_unittest.cc | 277 ++++
src/lib/dhcp/tests/option6_addrlst_unittest.cc | 69 +-
src/lib/dhcp/tests/option6_ia_unittest.cc | 137 +-
src/lib/dhcp/tests/option6_iaaddr_unittest.cc | 93 +-
src/lib/dhcp/tests/option_unittest.cc | 420 ++++--
src/lib/dhcp/tests/pkt4_unittest.cc | 497 +++++--
src/lib/dhcp/tests/pkt6_unittest.cc | 194 +--
src/lib/python/.gitignore | 1 +
src/lib/python/Makefile.am | 11 +-
src/lib/python/bind10_config.py.in | 37 +-
src/lib/python/bind10_config.py.win32 | 38 +-
src/lib/python/isc/Makefile.am | 2 +-
src/lib/python/isc/acl/Makefile.am | 4 +-
src/lib/python/isc/acl/tests/dns_test.py | 2 +-
src/lib/python/isc/bind10/Makefile.am | 3 +-
src/lib/python/isc/bind10/component.py | 673 +++++++++
src/lib/python/isc/bind10/sockcreator.py | 19 +-
src/lib/python/isc/bind10/socket_cache.py | 302 ++++
src/lib/python/isc/bind10/special_component.py | 133 ++
src/lib/python/isc/bind10/tests/Makefile.am | 3 +-
src/lib/python/isc/bind10/tests/component_test.py | 1071 ++++++++++++++
.../python/isc/bind10/tests/sockcreator_test.py | 12 +-
.../python/isc/bind10/tests/socket_cache_test.py | 396 +++++
src/lib/python/isc/cc/data.py | 8 +
src/lib/python/isc/cc/session.py | 25 +-
src/lib/python/isc/cc/tests/.gitignore | 1 +
src/lib/python/isc/cc/tests/session_test.py | 31 +-
src/lib/python/isc/config/Makefile.am | 1 +
src/lib/python/isc/config/ccsession.py | 162 +-
src/lib/python/isc/config/cfgmgr.py | 110 +-
src/lib/python/isc/config/cfgmgr_messages.mes | 11 +-
src/lib/python/isc/config/config_data.py | 129 +-
src/lib/python/isc/config/config_messages.mes | 16 +-
src/lib/python/isc/config/tests/.gitignore | 1 +
src/lib/python/isc/config/tests/Makefile.am | 1 +
src/lib/python/isc/config/tests/ccsession_test.py | 432 +++++-
src/lib/python/isc/config/tests/cfgmgr_test.py | 330 +++--
.../python/isc/config/tests/config_data_test.py | 150 +-
.../python/isc/config/tests/module_spec_test.py | 16 +-
src/lib/python/isc/datasrc/Makefile.am | 6 +-
src/lib/python/isc/datasrc/client_inc.cc | 83 +-
src/lib/python/isc/datasrc/client_python.cc | 102 +-
src/lib/python/isc/datasrc/datasrc.cc | 59 +-
src/lib/python/isc/datasrc/finder_inc.cc | 151 +-
src/lib/python/isc/datasrc/finder_python.cc | 165 ++-
src/lib/python/isc/datasrc/iterator_inc.cc | 33 +
src/lib/python/isc/datasrc/iterator_python.cc | 35 +-
src/lib/python/isc/datasrc/journal_reader_inc.cc | 80 +
...iterator_python.cc => journal_reader_python.cc} | 101 +-
src/lib/python/isc/datasrc/journal_reader_python.h | 47 +
src/lib/python/isc/datasrc/sqlite3_ds.py | 51 +-
.../isc/datasrc/tests}/.gitignore | 0
src/lib/python/isc/datasrc/tests/Makefile.am | 8 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 608 ++++++--
.../python/isc/datasrc/tests/sqlite3_ds_test.py | 124 +-
.../isc/datasrc/tests/testdata/example.com.sqlite3 | Bin 43008 -> 70656 bytes
.../tests/testdata/new_minor_schema.sqlite3 | Bin 2048 -> 2048 bytes
.../isc}/datasrc/tests/testdata/newschema.sqlite3 | Bin 2048 -> 2048 bytes
.../isc}/datasrc/tests/testdata/oldschema.sqlite3 | Bin 2048 -> 2048 bytes
src/lib/python/isc/datasrc/updater_python.cc | 49 +-
src/lib/python/isc/ddns/Makefile.am | 23 +
src/lib/python/isc/{bind10 => ddns}/__init__.py | 0
src/lib/python/isc/ddns/libddns_messages.mes | 214 +++
src/lib/python/isc/ddns/logger.py | 121 ++
src/lib/python/isc/ddns/session.py | 864 +++++++++++
src/lib/python/isc/ddns/tests/Makefile.am | 28 +
src/lib/python/isc/ddns/tests/session_tests.py | 1544 ++++++++++++++++++++
src/lib/python/isc/ddns/tests/zone_config_tests.py | 159 ++
src/lib/python/isc/ddns/zone_config.py | 102 ++
src/lib/python/isc/log/Makefile.am | 11 +-
src/lib/python/isc/log/log.cc | 217 +--
src/lib/python/isc/log/tests/.gitignore | 1 +
src/lib/python/isc/log/tests/Makefile.am | 3 +
src/lib/python/isc/log/tests/log_test.py | 41 +
src/lib/python/isc/log_messages/Makefile.am | 8 +
src/lib/python/isc/log_messages/dbutil_messages.py | 1 +
src/lib/python/isc/log_messages/ddns_messages.py | 1 +
.../python/isc/log_messages/libddns_messages.py | 1 +
.../isc/log_messages/server_common_messages.py | 1 +
src/lib/python/isc/log_messages/work/.gitignore | 2 +
src/lib/python/isc/log_messages/work/Makefile.am | 2 +-
src/lib/python/isc/notify/notify_out.py | 167 ++-
src/lib/python/isc/notify/notify_out_messages.mes | 39 +-
src/lib/python/isc/notify/tests/.gitignore | 1 +
src/lib/python/isc/notify/tests/Makefile.am | 10 +
src/lib/python/isc/notify/tests/notify_out_test.py | 102 +-
.../isc/notify/tests/testdata/brokentest.sqlite3 | Bin 0 -> 15360 bytes
.../python/isc/notify/tests/testdata/example.com | 10 +
.../python/isc/notify/tests/testdata/example.net | 14 +
.../isc/notify/tests/testdata/multisoa.example | 5 +
.../python/isc/notify/tests/testdata/nons.example | 3 +
.../python/isc/notify/tests/testdata/nosoa.example | 7 +
.../python/isc/notify/tests/testdata/test.sqlite3 | Bin 0 -> 19456 bytes
src/lib/python/isc/server_common/Makefile.am | 25 +
.../isc/{bind10 => server_common}/__init__.py | 0
src/lib/python/isc/server_common/auth_command.py | 90 ++
src/lib/python/isc/server_common/dns_tcp.py | 280 ++++
src/lib/python/isc/server_common/logger.py | 20 +
.../isc/server_common/server_common_messages.mes | 65 +
.../isc/{xfrin => server_common}/tests/Makefile.am | 3 +-
.../python/isc/server_common/tests/dns_tcp_test.py | 246 ++++
.../isc/server_common/tests/tsig_keyring_test.py | 193 +++
src/lib/python/isc/server_common/tsig_keyring.py | 121 ++
src/lib/python/isc/testutils/Makefile.am | 3 +-
src/lib/python/isc/testutils/ccsession_mock.py | 34 +
src/lib/python/isc/testutils/rrset_utils.py | 82 ++
src/lib/python/isc/util/Makefile.am | 2 +-
src/lib/python/isc/util/cio/Makefile.am | 41 +
src/lib/python/isc/util/cio/__init__.py | 3 +
src/lib/python/isc/util/cio/socketsession.py | 26 +
src/lib/python/isc/util/cio/socketsession_inc.cc | 122 ++
.../python/isc/util/cio/socketsession_python.cc | 79 +
.../isc/util/cio/socketsession_python.h} | 39 +-
.../isc/util/cio/socketsessionforwarder_inc.cc | 136 ++
.../isc/util/cio/socketsessionforwarder_python.cc | 309 ++++
.../isc/util/cio/socketsessionforwarder_python.h} | 45 +-
.../isc/util/cio/socketsessionreceiver_inc.cc | 89 ++
.../isc/util/cio/socketsessionreceiver_python.cc | 327 +++++
.../isc/util/cio/socketsessionreceiver_python.h} | 46 +-
.../isc/{config => util/cio}/tests/Makefile.am | 20 +-
.../isc/util/cio/tests/socketsession_test.py | 267 ++++
src/lib/python/isc/xfrin/diff.py | 427 +++++-
src/lib/python/isc/xfrin/libxfrin_messages.mes | 14 +-
src/lib/python/isc/xfrin/tests/Makefile.am | 1 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 663 ++++++++-
src/lib/server_common/.gitignore | 2 +
src/lib/server_common/Makefile.am | 2 +
.../lib/server_common/PARTIAL_PORT_ON_WINDOWS | 0
src/lib/server_common/client.cc | 3 -
src/lib/server_common/client.h | 2 +-
src/lib/server_common/logger.h | 13 +-
src/lib/server_common/portconfig.cc | 66 +-
src/lib/server_common/portconfig.h | 80 +-
src/lib/server_common/server_common_messages.mes | 43 +-
src/lib/server_common/socket_request.cc | 428 ++++++
src/lib/server_common/socket_request.h | 282 ++++
src/lib/server_common/tests/.gitignore | 2 +
src/lib/server_common/tests/Makefile.am | 4 +
src/lib/server_common/tests/client_unittest.cc | 1 +
src/lib/server_common/tests/portconfig_unittest.cc | 185 ++-
.../server_common/tests/socket_requestor_test.cc | 589 ++++++++
src/lib/statistics/Makefile.am | 24 +
src/lib/statistics/counter.cc | 82 ++
src/lib/statistics/counter.h | 69 +
src/lib/statistics/counter_dict.cc | 265 ++++
src/lib/statistics/counter_dict.h | 159 ++
src/lib/statistics/tests/Makefile.am | 50 +
src/lib/statistics/tests/counter_dict_unittest.cc | 174 +++
src/lib/statistics/tests/counter_unittest.cc | 85 ++
.../logger.h => statistics/tests/run_unittests.cc} | 39 +-
NEWS => src/lib/util/io/PARTIAL_PORT_ON_WINDOWS | 0
182 files changed, 21519 insertions(+), 3039 deletions(-)
create mode 100644 src/lib/dhcp/iface_mgr.cc
create mode 100644 src/lib/dhcp/iface_mgr.h
create mode 100644 src/lib/dhcp/iface_mgr_bsd.cc
create mode 100644 src/lib/dhcp/iface_mgr_linux.cc
create mode 100644 src/lib/dhcp/iface_mgr_sun.cc
create mode 100644 src/lib/dhcp/libdhcp++.cc
create mode 100644 src/lib/dhcp/libdhcp++.h
delete mode 100644 src/lib/dhcp/libdhcp.cc
delete mode 100644 src/lib/dhcp/libdhcp.h
create mode 100644 src/lib/dhcp/option4_addrlst.cc
create mode 100644 src/lib/dhcp/option4_addrlst.h
create mode 100644 src/lib/dhcp/tests/.gitignore
create mode 100644 src/lib/dhcp/tests/iface_mgr_unittest.cc
create mode 100644 src/lib/dhcp/tests/libdhcp++_unittest.cc
delete mode 100644 src/lib/dhcp/tests/libdhcp_unittest.cc
create mode 100644 src/lib/dhcp/tests/option4_addrlst_unittest.cc
create mode 100644 src/lib/python/.gitignore
create mode 100644 src/lib/python/isc/bind10/component.py
create mode 100644 src/lib/python/isc/bind10/socket_cache.py
create mode 100644 src/lib/python/isc/bind10/special_component.py
create mode 100644 src/lib/python/isc/bind10/tests/component_test.py
create mode 100644 src/lib/python/isc/bind10/tests/socket_cache_test.py
create mode 100644 src/lib/python/isc/cc/tests/.gitignore
create mode 100644 src/lib/python/isc/config/tests/.gitignore
create mode 100644 src/lib/python/isc/datasrc/journal_reader_inc.cc
copy src/lib/python/isc/datasrc/{iterator_python.cc => journal_reader_python.cc} (68%)
create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.h
copy src/lib/{datasrc/tests/testdata => python/isc/datasrc/tests}/.gitignore (100%)
copy src/lib/{ => python/isc}/datasrc/tests/testdata/new_minor_schema.sqlite3 (100%)
copy src/lib/{ => python/isc}/datasrc/tests/testdata/newschema.sqlite3 (100%)
copy src/lib/{ => python/isc}/datasrc/tests/testdata/oldschema.sqlite3 (100%)
create mode 100644 src/lib/python/isc/ddns/Makefile.am
copy src/lib/python/isc/{bind10 => ddns}/__init__.py (100%)
create mode 100644 src/lib/python/isc/ddns/libddns_messages.mes
create mode 100644 src/lib/python/isc/ddns/logger.py
create mode 100644 src/lib/python/isc/ddns/session.py
create mode 100644 src/lib/python/isc/ddns/tests/Makefile.am
create mode 100644 src/lib/python/isc/ddns/tests/session_tests.py
create mode 100644 src/lib/python/isc/ddns/tests/zone_config_tests.py
create mode 100644 src/lib/python/isc/ddns/zone_config.py
create mode 100644 src/lib/python/isc/log/tests/.gitignore
create mode 100644 src/lib/python/isc/log_messages/dbutil_messages.py
create mode 100644 src/lib/python/isc/log_messages/ddns_messages.py
create mode 100644 src/lib/python/isc/log_messages/libddns_messages.py
create mode 100644 src/lib/python/isc/log_messages/server_common_messages.py
create mode 100644 src/lib/python/isc/log_messages/work/.gitignore
create mode 100644 src/lib/python/isc/notify/tests/.gitignore
create mode 100644 src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
create mode 100644 src/lib/python/isc/notify/tests/testdata/example.com
create mode 100644 src/lib/python/isc/notify/tests/testdata/example.net
create mode 100644 src/lib/python/isc/notify/tests/testdata/multisoa.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/nons.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/nosoa.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/test.sqlite3
create mode 100644 src/lib/python/isc/server_common/Makefile.am
copy src/lib/python/isc/{bind10 => server_common}/__init__.py (100%)
create mode 100644 src/lib/python/isc/server_common/auth_command.py
create mode 100644 src/lib/python/isc/server_common/dns_tcp.py
create mode 100644 src/lib/python/isc/server_common/logger.py
create mode 100644 src/lib/python/isc/server_common/server_common_messages.mes
copy src/lib/python/isc/{xfrin => server_common}/tests/Makefile.am (92%)
create mode 100644 src/lib/python/isc/server_common/tests/dns_tcp_test.py
create mode 100644 src/lib/python/isc/server_common/tests/tsig_keyring_test.py
create mode 100644 src/lib/python/isc/server_common/tsig_keyring.py
create mode 100644 src/lib/python/isc/testutils/ccsession_mock.py
create mode 100644 src/lib/python/isc/testutils/rrset_utils.py
create mode 100644 src/lib/python/isc/util/cio/Makefile.am
create mode 100644 src/lib/python/isc/util/cio/__init__.py
create mode 100644 src/lib/python/isc/util/cio/socketsession.py
create mode 100644 src/lib/python/isc/util/cio/socketsession_inc.cc
create mode 100644 src/lib/python/isc/util/cio/socketsession_python.cc
copy src/lib/{server_common/logger.h => python/isc/util/cio/socketsession_python.h} (56%)
create mode 100644 src/lib/python/isc/util/cio/socketsessionforwarder_inc.cc
create mode 100644 src/lib/python/isc/util/cio/socketsessionforwarder_python.cc
copy src/lib/{server_common/logger.h => python/isc/util/cio/socketsessionforwarder_python.h} (55%)
create mode 100644 src/lib/python/isc/util/cio/socketsessionreceiver_inc.cc
create mode 100644 src/lib/python/isc/util/cio/socketsessionreceiver_python.cc
copy src/lib/{server_common/logger.h => python/isc/util/cio/socketsessionreceiver_python.h} (55%)
copy src/lib/python/isc/{config => util/cio}/tests/Makefile.am (60%)
create mode 100644 src/lib/python/isc/util/cio/tests/socketsession_test.py
create mode 100644 src/lib/server_common/.gitignore
copy NEWS => src/lib/server_common/PARTIAL_PORT_ON_WINDOWS (100%)
create mode 100644 src/lib/server_common/socket_request.cc
create mode 100644 src/lib/server_common/socket_request.h
create mode 100644 src/lib/server_common/tests/.gitignore
create mode 100644 src/lib/server_common/tests/socket_requestor_test.cc
create mode 100644 src/lib/statistics/Makefile.am
create mode 100644 src/lib/statistics/counter.cc
create mode 100644 src/lib/statistics/counter.h
create mode 100644 src/lib/statistics/counter_dict.cc
create mode 100644 src/lib/statistics/counter_dict.h
create mode 100644 src/lib/statistics/tests/Makefile.am
create mode 100644 src/lib/statistics/tests/counter_dict_unittest.cc
create mode 100644 src/lib/statistics/tests/counter_unittest.cc
copy src/lib/{server_common/logger.h => statistics/tests/run_unittests.cc} (50%)
copy NEWS => src/lib/util/io/PARTIAL_PORT_ON_WINDOWS (100%)
-----------------------------------------------------------------------
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
index 72317ab..ec169a5 100644
--- a/src/lib/dhcp/Makefile.am
+++ b/src/lib/dhcp/Makefile.am
@@ -5,22 +5,41 @@ AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
CLEANFILES = *.gcno *.gcda
-lib_LTLIBRARIES = libdhcp.la
-libdhcp_la_SOURCES =
-libdhcp_la_SOURCES += libdhcp.cc libdhcp.h
-libdhcp_la_SOURCES += option.cc option.h
-libdhcp_la_SOURCES += option6_ia.cc option6_ia.h
-libdhcp_la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
-libdhcp_la_SOURCES += option6_addrlst.cc option6_addrlst.h
-libdhcp_la_SOURCES += dhcp6.h
-libdhcp_la_SOURCES += pkt6.cc pkt6.h
-libdhcp_la_SOURCES += pkt4.cc pkt4.h
+lib_LTLIBRARIES = libdhcp++.la
+libdhcp___la_SOURCES =
+libdhcp___la_SOURCES += libdhcp++.cc libdhcp++.h
+libdhcp___la_SOURCES += iface_mgr.cc iface_mgr.h
+libdhcp___la_SOURCES += iface_mgr_linux.cc
+libdhcp___la_SOURCES += iface_mgr_bsd.cc
+libdhcp___la_SOURCES += iface_mgr_sun.cc
+libdhcp___la_SOURCES += option.cc option.h
+libdhcp___la_SOURCES += option6_ia.cc option6_ia.h
+libdhcp___la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
+libdhcp___la_SOURCES += option6_addrlst.cc option6_addrlst.h
+libdhcp___la_SOURCES += option4_addrlst.cc option4_addrlst.h
+libdhcp___la_SOURCES += dhcp6.h dhcp4.h
+libdhcp___la_SOURCES += pkt6.cc pkt6.h
+libdhcp___la_SOURCES += pkt4.cc pkt4.h
EXTRA_DIST = README
#EXTRA_DIST += log_messages.mes
-libdhcp_la_CXXFLAGS = $(AM_CXXFLAGS)
-libdhcp_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
-libdhcp_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
+libdhcp___la_CXXFLAGS = $(AM_CXXFLAGS)
+libdhcp___la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
+libdhcp___la_LIBADD = $(top_builddir)/src/lib/asiolink/libasiolink.la
+libdhcp___la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
+libdhcp___la_LDFLAGS = -no-undefined -version-info 2:0:0
+
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+libdhcp___la_CXXFLAGS += -Wno-unused-parameter
+endif
diff --git a/src/lib/dhcp/dhcp4.h b/src/lib/dhcp/dhcp4.h
index 854dca6..e058960 100644
--- a/src/lib/dhcp/dhcp4.h
+++ b/src/lib/dhcp/dhcp4.h
@@ -1,7 +1,3 @@
-/* dhcp.h
-
- Protocol structures... */
-
/*
* Copyright (c) 2004-2011 by Internet Systems Consortium, Inc. ("ISC")
* Copyright (c) 1995-2003 by Internet Software Consortium
@@ -30,7 +26,7 @@
* To learn more about Vixie Enterprises, see ``http://www.vix.com''.
*/
-/*
+/*
* NOTE: This files is imported from ISC DHCP. It uses C notation.
* Format kept for easier merge.
*/
@@ -38,154 +34,158 @@
#ifndef DHCP_H
#define DHCP_H
-#define DHCP_UDP_OVERHEAD (20 + /* IP header */ \
- 8) /* UDP header */
-#define DHCP_SNAME_LEN 64
-#define DHCP_FILE_LEN 128
-#define DHCP_FIXED_NON_UDP 236
-#define DHCP_FIXED_LEN (DHCP_FIXED_NON_UDP + DHCP_UDP_OVERHEAD)
- /* Everything but options. */
-#define BOOTP_MIN_LEN 300
+#include <stdint.h>
-#define DHCP_MTU_MAX 1500
-#define DHCP_MTU_MIN 576
-
-#define DHCP_MAX_OPTION_LEN (DHCP_MTU_MAX - DHCP_FIXED_LEN)
-#define DHCP_MIN_OPTION_LEN (DHCP_MTU_MIN - DHCP_FIXED_LEN)
+namespace isc {
+namespace dhcp {
/* BOOTP (rfc951) message types */
-#define BOOTREQUEST 1
-#define BOOTREPLY 2
+enum BOOTPTypes {
+ BOOTREQUEST = 1,
+ BOOTREPLY = 2
+};
/* Possible values for flags field... */
-#define BOOTP_BROADCAST 32768L
+static const uint16_t BOOTP_BROADCAST = 32768L;
/* Possible values for hardware type (htype) field... */
-#define HTYPE_ETHER 1 /* Ethernet 10Mbps */
-#define HTYPE_IEEE802 6 /* IEEE 802.2 Token Ring... */
-#define HTYPE_FDDI 8 /* FDDI... */
-
-/* Magic cookie validating dhcp options field (and bootp vendor
- extensions field). */
-#define DHCP_OPTIONS_COOKIE "\143\202\123\143"
+enum HType {
+ HTYPE_ETHER = 1, /* Ethernet 10Mbps */
+ HTYPE_IEEE802 = 6, /* IEEE 802.2 Token Ring */
+ HTYPE_FDDI = 8 /* FDDI */
+ /// TODO Add infiniband here
+};
/* DHCP Option codes: */
-
-#define DHO_PAD 0
-#define DHO_SUBNET_MASK 1
-#define DHO_TIME_OFFSET 2
-#define DHO_ROUTERS 3
-#define DHO_TIME_SERVERS 4
-#define DHO_NAME_SERVERS 5
-#define DHO_DOMAIN_NAME_SERVERS 6
-#define DHO_LOG_SERVERS 7
-#define DHO_COOKIE_SERVERS 8
-#define DHO_LPR_SERVERS 9
-#define DHO_IMPRESS_SERVERS 10
-#define DHO_RESOURCE_LOCATION_SERVERS 11
-#define DHO_HOST_NAME 12
-#define DHO_BOOT_SIZE 13
-#define DHO_MERIT_DUMP 14
-#define DHO_DOMAIN_NAME 15
-#define DHO_SWAP_SERVER 16
-#define DHO_ROOT_PATH 17
-#define DHO_EXTENSIONS_PATH 18
-#define DHO_IP_FORWARDING 19
-#define DHO_NON_LOCAL_SOURCE_ROUTING 20
-#define DHO_POLICY_FILTER 21
-#define DHO_MAX_DGRAM_REASSEMBLY 22
-#define DHO_DEFAULT_IP_TTL 23
-#define DHO_PATH_MTU_AGING_TIMEOUT 24
-#define DHO_PATH_MTU_PLATEAU_TABLE 25
-#define DHO_INTERFACE_MTU 26
-#define DHO_ALL_SUBNETS_LOCAL 27
-#define DHO_BROADCAST_ADDRESS 28
-#define DHO_PERFORM_MASK_DISCOVERY 29
-#define DHO_MASK_SUPPLIER 30
-#define DHO_ROUTER_DISCOVERY 31
-#define DHO_ROUTER_SOLICITATION_ADDRESS 32
-#define DHO_STATIC_ROUTES 33
-#define DHO_TRAILER_ENCAPSULATION 34
-#define DHO_ARP_CACHE_TIMEOUT 35
-#define DHO_IEEE802_3_ENCAPSULATION 36
-#define DHO_DEFAULT_TCP_TTL 37
-#define DHO_TCP_KEEPALIVE_INTERVAL 38
-#define DHO_TCP_KEEPALIVE_GARBAGE 39
-#define DHO_NIS_DOMAIN 40
-#define DHO_NIS_SERVERS 41
-#define DHO_NTP_SERVERS 42
-#define DHO_VENDOR_ENCAPSULATED_OPTIONS 43
-#define DHO_NETBIOS_NAME_SERVERS 44
-#define DHO_NETBIOS_DD_SERVER 45
-#define DHO_NETBIOS_NODE_TYPE 46
-#define DHO_NETBIOS_SCOPE 47
-#define DHO_FONT_SERVERS 48
-#define DHO_X_DISPLAY_MANAGER 49
-#define DHO_DHCP_REQUESTED_ADDRESS 50
-#define DHO_DHCP_LEASE_TIME 51
-#define DHO_DHCP_OPTION_OVERLOAD 52
-#define DHO_DHCP_MESSAGE_TYPE 53
-#define DHO_DHCP_SERVER_IDENTIFIER 54
-#define DHO_DHCP_PARAMETER_REQUEST_LIST 55
-#define DHO_DHCP_MESSAGE 56
-#define DHO_DHCP_MAX_MESSAGE_SIZE 57
-#define DHO_DHCP_RENEWAL_TIME 58
-#define DHO_DHCP_REBINDING_TIME 59
-#define DHO_VENDOR_CLASS_IDENTIFIER 60
-#define DHO_DHCP_CLIENT_IDENTIFIER 61
-#define DHO_NWIP_DOMAIN_NAME 62
-#define DHO_NWIP_SUBOPTIONS 63
-#define DHO_USER_CLASS 77
-#define DHO_FQDN 81
-#define DHO_DHCP_AGENT_OPTIONS 82
-#define DHO_AUTHENTICATE 90 /* RFC3118, was 210 */
-#define DHO_CLIENT_LAST_TRANSACTION_TIME 91
-#define DHO_ASSOCIATED_IP 92
-#define DHO_SUBNET_SELECTION 118 /* RFC3011! */
-#define DHO_DOMAIN_SEARCH 119 /* RFC3397 */
-#define DHO_VIVCO_SUBOPTIONS 124
-#define DHO_VIVSO_SUBOPTIONS 125
-
-#define DHO_END 255
+enum DHCPOptionType {
+ DHO_PAD = 0,
+ DHO_SUBNET_MASK = 1,
+ DHO_TIME_OFFSET = 2,
+ DHO_ROUTERS = 3,
+ DHO_TIME_SERVERS = 4,
+ DHO_NAME_SERVERS = 5,
+ DHO_DOMAIN_NAME_SERVERS = 6,
+ DHO_LOG_SERVERS = 7,
+ DHO_COOKIE_SERVERS = 8,
+ DHO_LPR_SERVERS = 9,
+ DHO_IMPRESS_SERVERS = 10,
+ DHO_RESOURCE_LOCATION_SERVERS = 11,
+ DHO_HOST_NAME = 12,
+ DHO_BOOT_SIZE = 13,
+ DHO_MERIT_DUMP = 14,
+ DHO_DOMAIN_NAME = 15,
+ DHO_SWAP_SERVER = 16,
+ DHO_ROOT_PATH = 17,
+ DHO_EXTENSIONS_PATH = 18,
+ DHO_IP_FORWARDING = 19,
+ DHO_NON_LOCAL_SOURCE_ROUTING = 20,
+ DHO_POLICY_FILTER = 21,
+ DHO_MAX_DGRAM_REASSEMBLY = 22,
+ DHO_DEFAULT_IP_TTL = 23,
+ DHO_PATH_MTU_AGING_TIMEOUT = 24,
+ DHO_PATH_MTU_PLATEAU_TABLE = 25,
+ DHO_INTERFACE_MTU = 26,
+ DHO_ALL_SUBNETS_LOCAL = 27,
+ DHO_BROADCAST_ADDRESS = 28,
+ DHO_PERFORM_MASK_DISCOVERY = 29,
+ DHO_MASK_SUPPLIER = 30,
+ DHO_ROUTER_DISCOVERY = 31,
+ DHO_ROUTER_SOLICITATION_ADDRESS = 32,
+ DHO_STATIC_ROUTES = 33,
+ DHO_TRAILER_ENCAPSULATION = 34,
+ DHO_ARP_CACHE_TIMEOUT = 35,
+ DHO_IEEE802_3_ENCAPSULATION = 36,
+ DHO_DEFAULT_TCP_TTL = 37,
+ DHO_TCP_KEEPALIVE_INTERVAL = 38,
+ DHO_TCP_KEEPALIVE_GARBAGE = 39,
+ DHO_NIS_DOMAIN = 40,
+ DHO_NIS_SERVERS = 41,
+ DHO_NTP_SERVERS = 42,
+ DHO_VENDOR_ENCAPSULATED_OPTIONS = 43,
+ DHO_NETBIOS_NAME_SERVERS = 44,
+ DHO_NETBIOS_DD_SERVER = 45,
+ DHO_NETBIOS_NODE_TYPE = 46,
+ DHO_NETBIOS_SCOPE = 47,
+ DHO_FONT_SERVERS = 48,
+ DHO_X_DISPLAY_MANAGER = 49,
+ DHO_DHCP_REQUESTED_ADDRESS = 50,
+ DHO_DHCP_LEASE_TIME = 51,
+ DHO_DHCP_OPTION_OVERLOAD = 52,
+ DHO_DHCP_MESSAGE_TYPE = 53,
+ DHO_DHCP_SERVER_IDENTIFIER = 54,
+ DHO_DHCP_PARAMETER_REQUEST_LIST = 55,
+ DHO_DHCP_MESSAGE = 56,
+ DHO_DHCP_MAX_MESSAGE_SIZE = 57,
+ DHO_DHCP_RENEWAL_TIME = 58,
+ DHO_DHCP_REBINDING_TIME = 59,
+ DHO_VENDOR_CLASS_IDENTIFIER = 60,
+ DHO_DHCP_CLIENT_IDENTIFIER = 61,
+ DHO_NWIP_DOMAIN_NAME = 62,
+ DHO_NWIP_SUBOPTIONS = 63,
+ DHO_USER_CLASS = 77,
+ DHO_FQDN = 81,
+ DHO_DHCP_AGENT_OPTIONS = 82,
+ DHO_AUTHENTICATE = 90, /* RFC3118, was 210 */
+ DHO_CLIENT_LAST_TRANSACTION_TIME = 91,
+ DHO_ASSOCIATED_IP = 92,
+ DHO_SUBNET_SELECTION = 118, /* RFC3011! */
+ DHO_DOMAIN_SEARCH = 119, /* RFC3397 */
+ DHO_VIVCO_SUBOPTIONS = 124,
+ DHO_VIVSO_SUBOPTIONS = 125,
+
+ DHO_END = 255
+};
/* DHCP message types. */
-#define DHCPDISCOVER 1
-#define DHCPOFFER 2
-#define DHCPREQUEST 3
-#define DHCPDECLINE 4
-#define DHCPACK 5
-#define DHCPNAK 6
-#define DHCPRELEASE 7
-#define DHCPINFORM 8
-#define DHCPLEASEQUERY 10
-#define DHCPLEASEUNASSIGNED 11
-#define DHCPLEASEUNKNOWN 12
-#define DHCPLEASEACTIVE 13
-
-
+enum DHCPMessageType {
+ DHCPDISCOVER = 1,
+ DHCPOFFER = 2,
+ DHCPREQUEST = 3,
+ DHCPDECLINE = 4,
+ DHCPACK = 5,
+ DHCPNAK = 6,
+ DHCPRELEASE = 7,
+ DHCPINFORM = 8,
+ DHCPLEASEQUERY = 10,
+ DHCPLEASEUNASSIGNED = 11,
+ DHCPLEASEUNKNOWN = 12,
+ DHCPLEASEACTIVE = 13
+};
+
+static const uint16_t DHCP4_CLIENT_PORT = 68;
+static const uint16_t DHCP4_SERVER_PORT = 67;
+
+/// Magic cookie validating dhcp options field (and bootp vendor
+/// extensions field).
+static const uint32_t DHCP_OPTIONS_COOKIE = 0x63825363;
+
+// TODO: Following are leftovers from dhcp.h import from ISC DHCP
+// They will be converted to C++-style defines once they will start
+// to be used.
+#if 0
/* Relay Agent Information option subtypes: */
-#define RAI_CIRCUIT_ID 1
-#define RAI_REMOTE_ID 2
-#define RAI_AGENT_ID 3
-#define RAI_LINK_SELECT 5
+#define RAI_CIRCUIT_ID 1
+#define RAI_REMOTE_ID 2
+#define RAI_AGENT_ID 3
+#define RAI_LINK_SELECT 5
/* FQDN suboptions: */
-#define FQDN_NO_CLIENT_UPDATE 1
-#define FQDN_SERVER_UPDATE 2
-#define FQDN_ENCODED 3
-#define FQDN_RCODE1 4
-#define FQDN_RCODE2 5
-#define FQDN_HOSTNAME 6
-#define FQDN_DOMAINNAME 7
-#define FQDN_FQDN 8
-#define FQDN_SUBOPTION_COUNT 8
+#define FQDN_NO_CLIENT_UPDATE 1
+#define FQDN_SERVER_UPDATE 2
+#define FQDN_ENCODED 3
+#define FQDN_RCODE1 4
+#define FQDN_RCODE2 5
+#define FQDN_HOSTNAME 6
+#define FQDN_DOMAINNAME 7
+#define FQDN_FQDN 8
+#define FQDN_SUBOPTION_COUNT 8
/* Enterprise Suboptions: */
-#define VENDOR_ISC_SUBOPTIONS 2495
+#define VENDOR_ISC_SUBOPTIONS 2495
-#define DHCP4_CLIENT_PORT 68
-#define DHCP4_SERVER_PORT 67
+#endif
+} // end of isc::dhcp namespace
+} // end of isc namespace
#endif /* DHCP_H */
-
diff --git a/src/lib/dhcp/dhcp6.h b/src/lib/dhcp/dhcp6.h
index 6012003..15c306d 100644
--- a/src/lib/dhcp/dhcp6.h
+++ b/src/lib/dhcp/dhcp6.h
@@ -108,6 +108,15 @@ extern const int dhcpv6_type_name_max;
#define DUID_LLT 1
#define DUID_EN 2
#define DUID_LL 3
+#define DUID_UUID 4
+
+// Define hardware types
+// Taken from http://www.iana.org/assignments/arp-parameters/
+#define HWTYPE_ETHERNET 0x0001
+#define HWTYPE_INIFINIBAND 0x0020
+
+// Taken from http://www.iana.org/assignments/enterprise-numbers
+#define ENTERPRISE_ID_ISC 2495
/* Offsets into IA_*'s where Option spaces commence. */
#define IA_NA_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
diff --git a/src/lib/dhcp/iface_mgr.cc b/src/lib/dhcp/iface_mgr.cc
new file mode 100644
index 0000000..508413d
--- /dev/null
+++ b/src/lib/dhcp/iface_mgr.cc
@@ -0,0 +1,1050 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <sstream>
+#include <fstream>
+#include <string.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <sys/select.h>
+
+#include <dhcp/dhcp4.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/iface_mgr.h>
+#include <exceptions/exceptions.h>
+#include <util/io/pktinfo_utilities.h>
+
+using namespace std;
+using namespace isc::asiolink;
+using namespace isc::util::io::internal;
+
+namespace isc {
+namespace dhcp {
+
+/// IfaceMgr is a singleton implementation
+IfaceMgr* IfaceMgr::instance_ = 0;
+
+void
+IfaceMgr::instanceCreate() {
+ if (instance_) {
+ // no need to do anything. Instance is already created.
+ // Who called it again anyway? Uh oh. Had to be us, as
+ // this is private method.
+ return;
+ }
+ instance_ = new IfaceMgr();
+}
+
+IfaceMgr&
+IfaceMgr::instance() {
+ if (instance_ == 0) {
+ instanceCreate();
+ }
+ return (*instance_);
+}
+
+IfaceMgr::Iface::Iface(const std::string& name, int ifindex)
+ :name_(name), ifindex_(ifindex), mac_len_(0), hardware_type_(0),
+ flag_loopback_(false), flag_up_(false), flag_running_(false),
+ flag_multicast_(false), flag_broadcast_(false), flags_(0)
+{
+ memset(mac_, 0, sizeof(mac_));
+}
+
+std::string
+IfaceMgr::Iface::getFullName() const {
+ ostringstream tmp;
+ tmp << name_ << "/" << ifindex_;
+ return (tmp.str());
+}
+
+std::string
+IfaceMgr::Iface::getPlainMac() const {
+ ostringstream tmp;
+ tmp.fill('0');
+ tmp << hex;
+ for (int i = 0; i < mac_len_; i++) {
+ tmp.width(2);
+ tmp << static_cast<int>(mac_[i]);
+ if (i < mac_len_-1) {
+ tmp << ":";
+ }
+ }
+ return (tmp.str());
+}
+
+void IfaceMgr::Iface::setMac(const uint8_t* mac, size_t len) {
+ if (len > IfaceMgr::MAX_MAC_LEN) {
+ isc_throw(OutOfRange, "Interface " << getFullName()
+ << " was detected to have link address of length "
+ << len << ", but maximum supported length is "
+ << IfaceMgr::MAX_MAC_LEN);
+ }
+ mac_len_ = len;
+ memcpy(mac_, mac, len);
+}
+
+bool IfaceMgr::Iface::delAddress(const isc::asiolink::IOAddress& addr) {
+ for (AddressCollection::iterator a = addrs_.begin();
+ a!=addrs_.end(); ++a) {
+ if (*a==addr) {
+ addrs_.erase(a);
+ return (true);
+ }
+ }
+ return (false);
+}
+
+bool IfaceMgr::Iface::delSocket(uint16_t sockfd) {
+ list<SocketInfo>::iterator sock = sockets_.begin();
+ while (sock!=sockets_.end()) {
+ if (sock->sockfd_ == sockfd) {
+ close(sockfd);
+ sockets_.erase(sock);
+ return (true); //socket found
+ }
+ ++sock;
+ }
+ return (false); // socket not found
+}
+
+IfaceMgr::IfaceMgr()
+ :control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
+ control_buf_(new char[control_buf_len_]),
+ session_socket_(INVALID_SOCKET), session_callback_(NULL)
+{
+
+ cout << "IfaceMgr initialization." << endl;
+
+ try {
+ // required for sending/receiving packets
+ // let's keep it in front, just in case someone
+ // wants to send anything during initialization
+
+ // control_buf_ = boost::scoped_array<char>();
+
+ detectIfaces();
+
+ } catch (const std::exception& ex) {
+ cout << "IfaceMgr creation failed:" << ex.what() << endl;
+
+ // TODO Uncomment this (or call LOG_FATAL) once
+ // interface detection is implemented. Otherwise
+ // it is not possible to run tests in a portable
+ // way (see detectIfaces() method).
+ throw;
+ }
+}
+
+void IfaceMgr::closeSockets() {
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+
+ for (SocketCollection::iterator sock = iface->sockets_.begin();
+ sock != iface->sockets_.end(); ++sock) {
+ cout << "Closing socket " << sock->sockfd_ << endl;
+ close(sock->sockfd_);
+ }
+ iface->sockets_.clear();
+ }
+
+}
+
+IfaceMgr::~IfaceMgr() {
+ // control_buf_ is deleted automatically (scoped_ptr)
+ control_buf_len_ = 0;
+
+ closeSockets();
+}
+
+void IfaceMgr::stubDetectIfaces() {
+ string ifaceName;
+ const string v4addr("127.0.0.1"), v6addr("::1");
+
+ // This is a stub implementation for interface detection. Actual detection
+ // is faked by detecting loopback interface (lo or lo0). It will eventually
+ // be removed once we have actual implementations for all supported systems.
+
+ cout << "Interface detection is not implemented on this Operating System yet. "
+ << endl;
+
+ try {
+ if (if_nametoindex("lo") > 0) {
+ ifaceName = "lo";
+ // this is Linux-like OS
+ } else if (if_nametoindex("lo0") > 0) {
+ ifaceName = "lo0";
+ // this is BSD-like OS
+ } else {
+ // we give up. What OS is this, anyway? Solaris? Hurd?
+ isc_throw(NotImplemented,
+ "Interface detection on this OS is not supported.");
+ }
+
+ Iface iface(ifaceName, if_nametoindex(ifaceName.c_str()));
+ iface.flag_up_ = true;
+ iface.flag_running_ = true;
+
+ // note that we claim that this is not a loopback. iface_mgr tries to open a
+ // socket on all interaces that are up, running and not loopback. As this is
+ // the only interface we were able to detect, let's pretend this is a normal
+ // interface.
+ iface.flag_loopback_ = false;
+ iface.flag_multicast_ = true;
+ iface.flag_broadcast_ = true;
+ iface.setHWType(HWTYPE_ETHERNET);
+
+ iface.addAddress(IOAddress(v4addr));
+ iface.addAddress(IOAddress(v6addr));
+ addInterface(iface);
+
+ cout << "Detected interface " << ifaceName << "/" << v4addr << "/"
+ << v6addr << endl;
+ } catch (const std::exception& ex) {
+ // TODO: deallocate whatever memory we used
+ // not that important, since this function is going to be
+ // thrown away as soon as we get proper interface detection
+ // implemented
+
+ // TODO Do LOG_FATAL here
+ std::cerr << "Interface detection failed." << std::endl;
+ throw;
+ }
+}
+
+bool IfaceMgr::openSockets4(const uint16_t port) {
+ int sock;
+ int count = 0;
+
+ for (IfaceCollection::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+
+ cout << "Trying opening socket on interface " << iface->getFullName() << endl;
+
+ if (iface->flag_loopback_ ||
+ !iface->flag_up_ ||
+ !iface->flag_running_) {
+ cout << "Interface " << iface->getFullName()
+ << " not suitable: is loopback, is down or not running" << endl;
+ continue;
+ }
+
+ AddressCollection addrs = iface->getAddresses();
+
+ for (AddressCollection::iterator addr= addrs.begin();
+ addr != addrs.end();
+ ++addr) {
+
+ // skip IPv6 addresses
+ if (addr->getFamily() != AF_INET) {
+ continue;
+ }
+
+ sock = openSocket(iface->getName(), *addr, port);
+ if (sock<0) {
+ cout << "Failed to open unicast socket." << endl;
+ return (false);
+ }
+
+ count++;
+ }
+ }
+ return (count > 0);
+
+}
+
+bool IfaceMgr::openSockets6(const uint16_t port) {
+ int sock;
+ int count = 0;
+
+ for (IfaceCollection::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+
+ if (iface->flag_loopback_ ||
+ !iface->flag_up_ ||
+ !iface->flag_running_) {
+ continue;
+ }
+
+ AddressCollection addrs = iface->getAddresses();
+
+ for (AddressCollection::iterator addr= addrs.begin();
+ addr != addrs.end();
+ ++addr) {
+
+ // skip IPv4 addresses
+ if (addr->getFamily() != AF_INET6) {
+ continue;
+ }
+
+ sock = openSocket(iface->getName(), *addr, port);
+ if (sock<0) {
+ cout << "Failed to open unicast socket." << endl;
+ return (false);
+ }
+
+ // Binding socket to unicast address and then joining multicast group
+ // works well on Mac OS (and possibly other BSDs), but does not work
+ // on Linux.
+ if ( !joinMulticast(sock, iface->getName(),
+ string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+ close(sock);
+ isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+ << " multicast group.");
+ }
+
+ count++;
+
+ /// @todo: Remove this ifdef once we start supporting BSD systems.
+#if defined(OS_LINUX)
+ // To receive multicast traffic, Linux requires binding socket to
+ // a multicast group. That in turn doesn't work on NetBSD.
+
+ int sock2 = openSocket(iface->getName(),
+ IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+ port);
+ if (sock2<0) {
+ isc_throw(Unexpected, "Failed to open multicast socket on "
+ << " interface " << iface->getFullName());
+ iface->delSocket(sock); // delete previously opened socket
+ }
+#endif
+ }
+ }
+ return (count > 0);
+}
+
+void
+IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
+ for (IfaceCollection::const_iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+
+ const AddressCollection& addrs = iface->getAddresses();
+
+ out << "Detected interface " << iface->getFullName()
+ << ", hwtype=" << iface->getHWType()
+ << ", mac=" << iface->getPlainMac();
+ out << ", flags=" << hex << iface->flags_ << dec << "("
+ << (iface->flag_loopback_?"LOOPBACK ":"")
+ << (iface->flag_up_?"UP ":"")
+ << (iface->flag_running_?"RUNNING ":"")
+ << (iface->flag_multicast_?"MULTICAST ":"")
+ << (iface->flag_broadcast_?"BROADCAST ":"")
+ << ")" << endl;
+ out << " " << addrs.size() << " addr(s):";
+
+ for (AddressCollection::const_iterator addr = addrs.begin();
+ addr != addrs.end(); ++addr) {
+ out << " " << addr->toText();
+ }
+ out << endl;
+ }
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(int ifindex) {
+ for (IfaceCollection::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ if (iface->getIndex() == ifindex)
+ return (&(*iface));
+ }
+
+ return (NULL); // not found
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(const std::string& ifname) {
+ for (IfaceCollection::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ if (iface->getName() == ifname)
+ return (&(*iface));
+ }
+
+ return (NULL); // not found
+}
+
+int IfaceMgr::openSocket(const std::string& ifname, const IOAddress& addr,
+ const uint16_t port) {
+ Iface* iface = getIface(ifname);
+ if (!iface) {
+ isc_throw(BadValue, "There is no " << ifname << " interface present.");
+ }
+ switch (addr.getFamily()) {
+ case AF_INET:
+ return openSocket4(*iface, addr, port);
+ case AF_INET6:
+ return openSocket6(*iface, addr, port);
+ default:
+ isc_throw(BadValue, "Failed to detect family of address: "
+ << addr.toText());
+ }
+}
+
+int IfaceMgr::openSocket4(Iface& iface, const IOAddress& addr, uint16_t port) {
+
+ cout << "Creating UDP4 socket on " << iface.getFullName()
+ << " " << addr.toText() << "/port=" << port << endl;
+
+ struct sockaddr_in addr4;
+ memset(&addr4, 0, sizeof(sockaddr));
+ addr4.sin_family = AF_INET;
+ addr4.sin_port = htons(port);
+
+ addr4.sin_addr.s_addr = htonl(addr);
+ //addr4.sin_addr.s_addr = 0; // anyaddr: this will receive 0.0.0.0 => 255.255.255.255 traffic
+ // addr4.sin_addr.s_addr = 0xffffffffu; // broadcast address. This will receive 0.0.0.0 => 255.255.255.255 as well
+
+ int sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ isc_throw(Unexpected, "Failed to create UDP6 socket.");
+ }
+
+ if (bind(sock, (struct sockaddr *)&addr4, sizeof(addr4)) < 0) {
+ close(sock);
+ isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port);
+ }
+
+ // if there is no support for IP_PKTINFO, we are really out of luck
+ // it will be difficult to undersand, where this packet came from
+#if defined(IP_PKTINFO)
+ int flag = 1;
+ if (setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &flag, sizeof(flag)) != 0) {
+ close(sock);
+ isc_throw(Unexpected, "setsockopt: IP_PKTINFO: failed.");
+ }
+#endif
+
+ cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
+ addr.toText() << "/port=" << port << endl;
+
+ SocketInfo info(sock, addr, port);
+ iface.addSocket(info);
+
+ return (sock);
+}
+
+int IfaceMgr::openSocket6(Iface& iface, const IOAddress& addr, uint16_t port) {
+
+ cout << "Creating UDP6 socket on " << iface.getFullName()
+ << " " << addr.toText() << "/port=" << port << endl;
+
+ struct sockaddr_in6 addr6;
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_port = htons(port);
+ if (addr.toText() != "::1")
+ addr6.sin6_scope_id = if_nametoindex(iface.getName().c_str());
+
+ memcpy(&addr6.sin6_addr,
+ addr.getAddress().to_v6().to_bytes().data(),
+ sizeof(addr6.sin6_addr));
+#ifdef HAVE_SA_LEN
+ addr6.sin6_len = sizeof(addr6);
+#endif
+
+ // TODO: use sockcreator once it becomes available
+
+ // make a socket
+ int sock = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ isc_throw(Unexpected, "Failed to create UDP6 socket.");
+ }
+
+ // Set the REUSEADDR option so that we don't fail to start if
+ // we're being restarted.
+ int flag = 1;
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&flag, sizeof(flag)) < 0) {
+ close(sock);
+ isc_throw(Unexpected, "Can't set SO_REUSEADDR option on dhcpv6 socket.");
+ }
+
+ if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
+ close(sock);
+ isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port);
+ }
+#ifdef IPV6_RECVPKTINFO
+ // RFC3542 - a new way
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
+ &flag, sizeof(flag)) != 0) {
+ close(sock);
+ isc_throw(Unexpected, "setsockopt: IPV6_RECVPKTINFO failed.");
+ }
+#else
+ // RFC2292 - an old way
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
+ &flag, sizeof(flag)) != 0) {
+ close(sock);
+ isc_throw(Unexpected, "setsockopt: IPV6_PKTINFO: failed.");
+ }
+#endif
+
+ // multicast stuff
+ if (addr.getAddress().to_v6().is_multicast()) {
+ // both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
+ // are link and site-scoped, so there is no sense to join those groups
+ // with global addresses.
+
+ if ( !joinMulticast( sock, iface.getName(),
+ string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+ close(sock);
+ isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+ << " multicast group.");
+ }
+ }
+
+ cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
+ addr.toText() << "/port=" << port << endl;
+
+ SocketInfo info(sock, addr, port);
+ iface.addSocket(info);
+
+ return (sock);
+}
+
+bool
+IfaceMgr::joinMulticast(int sock, const std::string& ifname,
+const std::string & mcast) {
+
+ struct ipv6_mreq mreq;
+
+ if (inet_pton(AF_INET6, mcast.c_str(),
+ &mreq.ipv6mr_multiaddr) <= 0) {
+ cout << "Failed to convert " << ifname
+ << " to IPv6 multicast address." << endl;
+ return (false);
+ }
+
+ mreq.ipv6mr_interface = if_nametoindex(ifname.c_str());
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_JOIN_GROUP,
+ &mreq, sizeof(mreq)) < 0) {
+ cout << "Failed to join " << mcast << " multicast group." << endl;
+ return (false);
+ }
+
+ cout << "Joined multicast " << mcast << " group." << endl;
+
+ return (true);
+}
+
+bool
+IfaceMgr::send(const Pkt6Ptr& pkt) {
+ int result;
+
+ Iface* iface = getIface(pkt->getIface());
+ if (!iface) {
+ isc_throw(BadValue, "Unable to send Pkt6. Invalid interface ("
+ << pkt->getIface() << ") specified.");
+ }
+
+ memset(&control_buf_[0], 0, control_buf_len_);
+
+
+ // Set the target address we're sending to.
+ sockaddr_in6 to;
+ memset(&to, 0, sizeof(to));
+ to.sin6_family = AF_INET6;
+ to.sin6_port = htons(pkt->getRemotePort());
+ memcpy(&to.sin6_addr,
+ pkt->getRemoteAddr().getAddress().to_v6().to_bytes().data(),
+ 16);
+ to.sin6_scope_id = pkt->getIndex();
+
+ // Initialize our message header structure.
+ struct msghdr m;
+ memset(&m, 0, sizeof(m));
+ m.msg_name = &to;
+ m.msg_namelen = sizeof(to);
+
+ // Set the data buffer we're sending. (Using this wacky
+ // "scatter-gather" stuff... we only have a single chunk
+ // of data to send, so we declare a single vector entry.)
+
+ // As v structure is a C-style is used for both sending and
+ // receiving data, it is shared between sending and receiving
+ // (sendmsg and recvmsg). It is also defined in system headers,
+ // so we have no control over its definition. To set iov_base
+ // (defined as void*) we must use const cast from void *.
+ // Otherwise C++ compiler would complain that we are trying
+ // to assign const void* to void*.
+ struct iovec v;
+ memset(&v, 0, sizeof(v));
+ v.iov_base = const_cast<void *>(pkt->getBuffer().getData());
+ v.iov_len = pkt->getBuffer().getLength();
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ // Setting the interface is a bit more involved.
+ //
+ // We have to create a "control message", and set that to
+ // define the IPv6 packet information. We could set the
+ // source address if we wanted, but we can safely let the
+ // kernel decide what that should be.
+ m.msg_control = &control_buf_[0];
+ m.msg_controllen = control_buf_len_;
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&m);
+ cmsg->cmsg_level = IPPROTO_IPV6;
+ cmsg->cmsg_type = IPV6_PKTINFO;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
+ struct in6_pktinfo *pktinfo = convertPktInfo6(CMSG_DATA(cmsg));
+ memset(pktinfo, 0, sizeof(struct in6_pktinfo));
+ pktinfo->ipi6_ifindex = pkt->getIndex();
+ m.msg_controllen = cmsg->cmsg_len;
+
+ pkt->updateTimestamp();
+
+ result = sendmsg(getSocket(*pkt), &m, 0);
+ if (result < 0) {
+ isc_throw(Unexpected, "Pkt6 send failed: sendmsg() returned " << result);
+ }
+ cout << "Sent " << pkt->getBuffer().getLength() << " bytes over socket " << getSocket(*pkt)
+ << " on " << iface->getFullName() << " interface: "
+ << " dst=[" << pkt->getRemoteAddr().toText() << "]:" << pkt->getRemotePort()
+ << ", src=" << pkt->getLocalAddr().toText() << "]:" << pkt->getLocalPort()
+ << endl;
+
+ return (result);
+}
+
+bool
+IfaceMgr::send(const Pkt4Ptr& pkt)
+{
+
+ Iface* iface = getIface(pkt->getIface());
+ if (!iface) {
+ isc_throw(BadValue, "Unable to send Pkt4. Invalid interface ("
+ << pkt->getIface() << ") specified.");
+ }
+
+ memset(&control_buf_[0], 0, control_buf_len_);
+
+
+ // Set the target address we're sending to.
+ sockaddr_in to;
+ memset(&to, 0, sizeof(to));
+ to.sin_family = AF_INET;
+ to.sin_port = htons(pkt->getRemotePort());
+ to.sin_addr.s_addr = htonl(pkt->getRemoteAddr());
+
+ struct msghdr m;
+ // Initialize our message header structure.
+ memset(&m, 0, sizeof(m));
+ m.msg_name = &to;
+ m.msg_namelen = sizeof(to);
+
+ // Set the data buffer we're sending. (Using this wacky
+ // "scatter-gather" stuff... we only have a single chunk
+ // of data to send, so we declare a single vector entry.)
+ struct iovec v;
+ memset(&v, 0, sizeof(v));
+ // iov_base field is of void * type. We use it for packet
+ // transmission, so this buffer will not be modified.
+ v.iov_base = const_cast<void *>(pkt->getBuffer().getData());
+ v.iov_len = pkt->getBuffer().getLength();
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ // call OS-specific routines (like setting interface index)
+ os_send4(m, control_buf_, control_buf_len_, pkt);
+
+ cout << "Trying to send " << pkt->getBuffer().getLength() << " bytes to "
+ << pkt->getRemoteAddr().toText() << ":" << pkt->getRemotePort()
+ << " over socket " << getSocket(*pkt) << " on interface "
+ << getIface(pkt->getIface())->getFullName() << endl;
+
+ pkt->updateTimestamp();
+
+ int result = sendmsg(getSocket(*pkt), &m, 0);
+ if (result < 0) {
+ isc_throw(Unexpected, "Pkt4 send failed.");
+ }
+
+ cout << "Sent " << pkt->getBuffer().getLength() << " bytes over socket " << getSocket(*pkt)
+ << " on " << iface->getFullName() << " interface: "
+ << " dst=" << pkt->getRemoteAddr().toText() << ":" << pkt->getRemotePort()
+ << ", src=" << pkt->getLocalAddr().toText() << ":" << pkt->getLocalPort()
+ << endl;
+
+ return (result);
+}
+
+
+boost::shared_ptr<Pkt4>
+IfaceMgr::receive4(uint32_t timeout) {
+
+ const SocketInfo* candidate = 0;
+ IfaceCollection::const_iterator iface;
+
+ fd_set sockets;
+ FD_ZERO(&sockets);
+ int maxfd = 0;
+
+ stringstream names;
+
+ /// @todo: marginal performance optimization. We could create the set once
+ /// and then use its copy for select(). Please note that select() modifies
+ /// provided set to indicated which sockets have something to read.
+ for (iface = ifaces_.begin(); iface != ifaces_.end(); ++iface) {
+
+ for (SocketCollection::const_iterator s = iface->sockets_.begin();
+ s != iface->sockets_.end(); ++s) {
+
+ // Only deal with IPv4 addresses.
+ if (s->addr_.getFamily() == AF_INET) {
+ names << s->sockfd_ << "(" << iface->getName() << ") ";
+
+ // Add this socket to listening set
+ FD_SET(s->sockfd_, &sockets);
+ if (maxfd < s->sockfd_) {
+ maxfd = s->sockfd_;
+ }
+ }
+ }
+ }
+
+ // if there is session socket registered...
+ if (session_socket_ != INVALID_SOCKET) {
+ // at it to the set as well
+ FD_SET(session_socket_, &sockets);
+ if (maxfd < session_socket_)
+ maxfd = session_socket_;
+ names << session_socket_ << "(session)";
+ }
+
+ /// @todo: implement sub-second precision one day
+ struct timeval select_timeout;
+ select_timeout.tv_sec = timeout;
+ select_timeout.tv_usec = 0;
+
+ cout << "Trying to receive data on sockets: " << names.str()
+ << ". Timeout is " << timeout << " seconds." << endl;
+ int result = select(maxfd + 1, &sockets, NULL, NULL, &select_timeout);
+ cout << "select returned " << result << endl;
+
+ if (result == 0) {
+ // nothing received and timeout has been reached
+ return (Pkt4Ptr()); // NULL
+ } else if (result < 0) {
+ cout << "Socket read error: " << strerror(errno) << endl;
+
+ /// @todo: perhaps throw here?
+ return (Pkt4Ptr()); // NULL
+ }
+
+ // Let's find out which socket has the data
+ if ((session_socket_ != INVALID_SOCKET) && (FD_ISSET(session_socket_, &sockets))) {
+ // something received over session socket
+ cout << "BIND10 command or config available over session socket." << endl;
+
+ if (session_callback_) {
+ // in theory we could call io_service.run_one() here, instead of
+ // implementing callback mechanism, but that would introduce
+ // asiolink dependency to libdhcp++ and that is something we want
+ // to avoid (see CPE market and out long term plans for minimalistic
+ // implementations.
+ session_callback_();
+ }
+
+ return (Pkt4Ptr()); // NULL
+ }
+
+ // Let's find out which interface/socket has the data
+ for (iface = ifaces_.begin(); iface != ifaces_.end(); ++iface) {
+ for (SocketCollection::const_iterator s = iface->sockets_.begin();
+ s != iface->sockets_.end(); ++s) {
+ if (FD_ISSET(s->sockfd_, &sockets)) {
+ candidate = &(*s);
+ break;
+ }
+ }
+ if (candidate) {
+ break;
+ }
+ }
+
+ if (!candidate) {
+ cout << "Received data over unknown socket." << endl;
+ return (Pkt4Ptr()); // NULL
+ }
+
+ cout << "Trying to receive over UDP4 socket " << candidate->sockfd_ << " bound to "
+ << candidate->addr_.toText() << "/port=" << candidate->port_ << " on "
+ << iface->getFullName() << endl;
+
+ // Now we have a socket, let's get some data from it!
+ struct sockaddr_in from_addr;
+ uint8_t buf[RCVBUFSIZE];
+
+ memset(&control_buf_[0], 0, control_buf_len_);
+ memset(&from_addr, 0, sizeof(from_addr));
+
+ // Initialize our message header structure.
+ struct msghdr m;
+ memset(&m, 0, sizeof(m));
+
+ // Point so we can get the from address.
+ m.msg_name = &from_addr;
+ m.msg_namelen = sizeof(from_addr);
+
+ struct iovec v;
+ v.iov_base = static_cast<void*>(buf);
+ v.iov_len = RCVBUFSIZE;
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ // Getting the interface is a bit more involved.
+ //
+ // We set up some space for a "control message". We have
+ // previously asked the kernel to give us packet
+ // information (when we initialized the interface), so we
+ // should get the destination address from that.
+ m.msg_control = &control_buf_[0];
+ m.msg_controllen = control_buf_len_;
+
+ result = recvmsg(candidate->sockfd_, &m, 0);
+ if (result < 0) {
+ cout << "Failed to receive UDP4 data." << endl;
+ return (Pkt4Ptr()); // NULL
+ }
+
+ // We have all data let's create Pkt4 object.
+ Pkt4Ptr pkt = Pkt4Ptr(new Pkt4(buf, result));
+
+ pkt->updateTimestamp();
+
+ unsigned int ifindex = iface->getIndex();
+
+ IOAddress from(htonl(from_addr.sin_addr.s_addr));
+ uint16_t from_port = htons(from_addr.sin_port);
+
+ // Set receiving interface based on information, which socket was used to
+ // receive data. OS-specific info (see os_receive4()) may be more reliable,
+ // so this value may be overwritten.
+ pkt->setIndex(ifindex);
+ pkt->setIface(iface->getName());
+ pkt->setRemoteAddr(from);
+ pkt->setRemotePort(from_port);
+ pkt->setLocalPort(candidate->port_);
+
+ if (!os_receive4(m, pkt)) {
+ cout << "Unable to find pktinfo" << endl;
+ return (boost::shared_ptr<Pkt4>()); // NULL
+ }
+
+ cout << "Received " << result << " bytes from " << from.toText()
+ << "/port=" << from_port
+ << " sent to " << pkt->getLocalAddr().toText() << " over interface "
+ << iface->getFullName() << endl;
+
+ return (pkt);
+}
+
+Pkt6Ptr IfaceMgr::receive6() {
+ uint8_t buf[RCVBUFSIZE];
+
+ memset(&control_buf_[0], 0, control_buf_len_);
+ struct sockaddr_in6 from;
+ memset(&from, 0, sizeof(from));
+
+ // Initialize our message header structure.
+ struct msghdr m;
+ memset(&m, 0, sizeof(m));
+
+ // Point so we can get the from address.
+ m.msg_name = &from;
+ m.msg_namelen = sizeof(from);
+
+ // Set the data buffer we're receiving. (Using this wacky
+ // "scatter-gather" stuff... but we that doesn't really make
+ // sense for us, so we use a single vector entry.)
+ struct iovec v;
+ memset(&v, 0, sizeof(v));
+ v.iov_base = static_cast<void*>(buf);
+ v.iov_len = RCVBUFSIZE;
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ // Getting the interface is a bit more involved.
+ //
+ // We set up some space for a "control message". We have
+ // previously asked the kernel to give us packet
+ // information (when we initialized the interface), so we
+ // should get the destination address from that.
+ m.msg_control = &control_buf_[0];
+ m.msg_controllen = control_buf_len_;
+
+ /// TODO: Need to move to select() and pool over
+ /// all available sockets. For now, we just take the
+ /// first interface and use first socket from it.
+ IfaceCollection::const_iterator iface = ifaces_.begin();
+ const SocketInfo* candidate = 0;
+ while (iface != ifaces_.end()) {
+ for (SocketCollection::const_iterator s = iface->sockets_.begin();
+ s != iface->sockets_.end(); ++s) {
+ if (s->addr_.getFamily() != AF_INET6) {
+ continue;
+ }
+ if (s->addr_.getAddress().to_v6().is_multicast()) {
+ candidate = &(*s);
+ break;
+ }
+ if (!candidate) {
+ candidate = &(*s); // it's not multicast, but it's better than nothing
+ }
+ }
+ if (candidate) {
+ break;
+ }
+ ++iface;
+ }
+ if (iface == ifaces_.end()) {
+ isc_throw(Unexpected, "No suitable IPv6 interfaces detected. Can't receive anything.");
+ }
+
+ if (!candidate) {
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any sockets open.");
+ }
+
+ cout << "Trying to receive over UDP6 socket " << candidate->sockfd_ << " bound to "
+ << candidate->addr_.toText() << "/port=" << candidate->port_ << " on "
+ << iface->getFullName() << endl;
+ int result = recvmsg(candidate->sockfd_, &m, 0);
+
+ struct in6_addr to_addr;
+ memset(&to_addr, 0, sizeof(to_addr));
+
+ int ifindex = -1;
+ if (result >= 0) {
+ struct in6_pktinfo* pktinfo = NULL;
+
+
+ // If we did read successfully, then we need to loop
+ // through the control messages we received and
+ // find the one with our destination address.
+ //
+ // We also keep a flag to see if we found it. If we
+ // didn't, then we consider this to be an error.
+ bool found_pktinfo = false;
+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&m);
+ while (cmsg != NULL) {
+ if ((cmsg->cmsg_level == IPPROTO_IPV6) &&
+ (cmsg->cmsg_type == IPV6_PKTINFO)) {
+ pktinfo = convertPktInfo6(CMSG_DATA(cmsg));
+ to_addr = pktinfo->ipi6_addr;
+ ifindex = pktinfo->ipi6_ifindex;
+ found_pktinfo = true;
+ break;
+ }
+ cmsg = CMSG_NXTHDR(&m, cmsg);
+ }
+ if (!found_pktinfo) {
+ cout << "Unable to find pktinfo" << endl;
+ return (Pkt6Ptr()); // NULL
+ }
+ } else {
+ cout << "Failed to receive data." << endl;
+ return (Pkt6Ptr()); // NULL
+ }
+
+ // Let's create a packet.
+ Pkt6Ptr pkt;
+ try {
+ pkt = Pkt6Ptr(new Pkt6(buf, result));
+ } catch (const std::exception& ex) {
+ cout << "Failed to create new packet." << endl;
+ return (Pkt6Ptr()); // NULL
+ }
+
+ pkt->updateTimestamp();
+
+ pkt->setLocalAddr(IOAddress::from_bytes(AF_INET6,
+ reinterpret_cast<const uint8_t*>(&to_addr)));
+ pkt->setRemoteAddr(IOAddress::from_bytes(AF_INET6,
+ reinterpret_cast<const uint8_t*>(&from.sin6_addr)));
+ pkt->setRemotePort(ntohs(from.sin6_port));
+ pkt->setIndex(ifindex);
+
+ Iface* received = getIface(pkt->getIndex());
+ if (received) {
+ pkt->setIface(received->getName());
+ } else {
+ cout << "Received packet over unknown interface (ifindex="
+ << pkt->getIndex() << ")." << endl;
+ return (boost::shared_ptr<Pkt6>()); // NULL
+ }
+
+ /// @todo: Move this to LOG_DEBUG
+ cout << "Received " << pkt->getBuffer().getLength() << " bytes over "
+ << pkt->getIface() << "/" << pkt->getIndex() << " interface: "
+ << " src=" << pkt->getRemoteAddr().toText()
+ << ", dst=" << pkt->getLocalAddr().toText()
+ << endl;
+
+ return (pkt);
+}
+
+uint16_t IfaceMgr::getSocket(const isc::dhcp::Pkt6& pkt) {
+ Iface* iface = getIface(pkt.getIface());
+ if (iface == NULL) {
+ isc_throw(BadValue, "Tried to find socket for non-existent interface "
+ << pkt.getIface());
+ }
+
+ SocketCollection::const_iterator s;
+ for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+ if ((s->family_ == AF_INET6) &&
+ (!s->addr_.getAddress().to_v6().is_multicast())) {
+ return (s->sockfd_);
+ }
+ /// @todo: Add more checks here later. If remote address is
+ /// not link-local, we can't use link local bound socket
+ /// to send data.
+ }
+
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any suitable IPv6 sockets open.");
+}
+
+uint16_t IfaceMgr::getSocket(isc::dhcp::Pkt4 const& pkt) {
+ Iface* iface = getIface(pkt.getIface());
+ if (iface == NULL) {
+ isc_throw(BadValue, "Tried to find socket for non-existent interface "
+ << pkt.getIface());
+ }
+
+ SocketCollection::const_iterator s;
+ for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+ if (s->family_ == AF_INET) {
+ return (s->sockfd_);
+ }
+ /// TODO: Add more checks here later. If remote address is
+ /// not link-local, we can't use link local bound socket
+ /// to send data.
+ }
+
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any suitable IPv4 sockets open.");
+}
+
+} // end of namespace isc::dhcp
+} // end of namespace isc
diff --git a/src/lib/dhcp/iface_mgr.h b/src/lib/dhcp/iface_mgr.h
new file mode 100644
index 0000000..7fa2e85
--- /dev/null
+++ b/src/lib/dhcp/iface_mgr.h
@@ -0,0 +1,556 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef IFACE_MGR_H
+#define IFACE_MGR_H
+
+#include <list>
+#include <boost/shared_ptr.hpp>
+#include <boost/scoped_array.hpp>
+#include <boost/noncopyable.hpp>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/pkt6.h>
+
+namespace isc {
+
+namespace dhcp {
+/// @brief handles network interfaces, transmission and reception
+///
+/// IfaceMgr is an interface manager class that detects available network
+/// interfaces, configured addresses, link-local addresses, and provides
+/// API for using sockets.
+///
+class IfaceMgr : public boost::noncopyable {
+public:
+ /// type that defines list of addresses
+ typedef std::vector<isc::asiolink::IOAddress> AddressCollection;
+
+ /// defines callback used when commands are received over control session
+ typedef void (*SessionCallback) (void);
+
+ /// maximum MAC address length (Infiniband uses 20 bytes)
+ static const unsigned int MAX_MAC_LEN = 20;
+
+ /// @brief Packet reception buffer size
+ ///
+ /// RFC3315 states that server responses may be
+ /// fragmented if they are over MTU. There is no
+ /// text whether client's packets may be larger
+ /// than 1500. For now, we can assume that
+ /// we don't support packets larger than 1500.
+ static const uint32_t RCVBUFSIZE = 1500;
+
+ /// Holds information about socket.
+ struct SocketInfo {
+ uint16_t sockfd_; /// socket descriptor
+ isc::asiolink::IOAddress addr_; /// bound address
+ uint16_t port_; /// socket port
+ uint16_t family_; /// IPv4 or IPv6
+
+ /// @brief SocketInfo constructor.
+ ///
+ /// @param sockfd socket descriptor
+ /// @param addr an address the socket is bound to
+ /// @param port a port the socket is bound to
+ SocketInfo(uint16_t sockfd, const isc::asiolink::IOAddress& addr,
+ uint16_t port)
+ :sockfd_(sockfd), addr_(addr), port_(port), family_(addr.getFamily()) { }
+ };
+
+ /// type that holds a list of socket informations
+ typedef std::list<SocketInfo> SocketCollection;
+
+ /// @brief represents a single network interface
+ ///
+ /// Iface structure represents network interface with all useful
+ /// information, like name, interface index, MAC address and
+ /// list of assigned addresses
+ class Iface {
+ public:
+ /// @brief Iface constructor.
+ ///
+ /// Creates Iface object that represents network interface.
+ ///
+ /// @param name name of the interface
+ /// @param ifindex interface index (unique integer identifier)
+ Iface(const std::string& name, int ifindex);
+
+ /// @brief Returns full interface name as "ifname/ifindex" string.
+ ///
+ /// @return string with interface name
+ std::string getFullName() const;
+
+ /// @brief Returns link-layer address a plain text.
+ ///
+ /// @return MAC address as a plain text (string)
+ std::string getPlainMac() const;
+
+ /// @brief Sets MAC address of the interface.
+ ///
+ /// @param mac pointer to MAC address buffer
+ /// @param macLen length of mac address
+ void setMac(const uint8_t* mac, size_t macLen);
+
+ /// @brief Returns MAC length.
+ ///
+ /// @return length of MAC address
+ size_t getMacLen() const { return mac_len_; }
+
+ /// @brief Returns pointer to MAC address.
+ ///
+ /// Note: Returned pointer is only valid as long as the interface object
+ /// that returned it.
+ const uint8_t* getMac() const { return mac_; }
+
+ /// @brief Sets flag_*_ fields based on bitmask value returned by OS
+ ///
+ /// Note: Implementation of this method is OS-dependent as bits have
+ /// different meaning on each OS.
+ ///
+ /// @param flags bitmask value returned by OS in interface detection
+ void setFlags(uint32_t flags);
+
+ /// @brief Returns interface index.
+ ///
+ /// @return interface index
+ uint16_t getIndex() const { return ifindex_; }
+
+ /// @brief Returns interface name.
+ ///
+ /// @return interface name
+ std::string getName() const { return name_; };
+
+ /// @brief Sets up hardware type of the interface.
+ ///
+ /// @param type hardware type
+ void setHWType(uint16_t type ) { hardware_type_ = type; }
+
+ /// @brief Returns hardware type of the interface.
+ ///
+ /// @return hardware type
+ uint16_t getHWType() const { return hardware_type_; }
+
+ /// @brief Returns all interfaces available on an interface.
+ ///
+ /// Care should be taken to not use this collection after Iface object
+ /// ceases to exist. That is easy in most cases as Iface objects are
+ /// created by IfaceMgr that is a singleton an is expected to be
+ /// available at all time. We may revisit this if we ever decide to
+ /// implement dynamic interface detection, but such fancy feature would
+ /// mostly be useful for clients with wifi/vpn/virtual interfaces.
+ ///
+ /// @return collection of addresses
+ const AddressCollection& getAddresses() const { return addrs_; }
+
+ /// @brief Adds an address to an interface.
+ ///
+ /// This only adds an address to collection, it does not physically
+ /// configure address on actual network interface.
+ ///
+ /// @param addr address to be added
+ void addAddress(const isc::asiolink::IOAddress& addr) {
+ addrs_.push_back(addr);
+ }
+
+ /// @brief Deletes an address from an interface.
+ ///
+ /// This only deletes address from collection, it does not physically
+ /// remove address configuration from actual network interface.
+ ///
+ /// @param addr address to be removed.
+ ///
+ /// @return true if removal was successful (address was in collection),
+ /// false otherwise
+ bool delAddress(const isc::asiolink::IOAddress& addr);
+
+ /// @brief Adds socket descriptor to an interface.
+ ///
+ /// @param sock SocketInfo structure that describes socket.
+ void addSocket(const SocketInfo& sock)
+ { sockets_.push_back(sock); }
+
+ /// @brief Closes socket.
+ ///
+ /// Closes socket and removes corresponding SocketInfo structure
+ /// from an interface.
+ ///
+ /// @param sockfd socket descriptor to be closed/removed.
+ /// @return true if there was such socket, false otherwise
+ bool delSocket(uint16_t sockfd);
+
+ /// socket used to sending data
+ /// TODO: this should be protected
+ SocketCollection sockets_;
+
+ protected:
+ /// network interface name
+ std::string name_;
+
+ /// interface index (a value that uniquely indentifies an interface)
+ int ifindex_;
+
+ /// list of assigned addresses
+ AddressCollection addrs_;
+
+ /// link-layer address
+ uint8_t mac_[MAX_MAC_LEN];
+
+ /// length of link-layer address (usually 6)
+ size_t mac_len_;
+
+ /// hardware type
+ uint16_t hardware_type_;
+
+ public:
+ /// @todo: Make those fields protected once we start supporting more
+ /// than just Linux
+
+ /// specifies if selected interface is loopback
+ bool flag_loopback_;
+
+ /// specifies if selected interface is up
+ bool flag_up_;
+
+ /// flag specifies if selected interface is running
+ /// (e.g. cable plugged in, wifi associated)
+ bool flag_running_;
+
+ /// flag specifies if selected interface is multicast capable
+ bool flag_multicast_;
+
+ /// flag specifies if selected interface is broadcast capable
+ bool flag_broadcast_;
+
+ /// interface flags (this value is as is returned by OS,
+ /// it may mean different things on different OSes)
+ uint32_t flags_;
+ };
+
+ // TODO performance improvement: we may change this into
+ // 2 maps (ifindex-indexed and name-indexed) and
+ // also hide it (make it public make tests easier for now)
+
+ /// type that holds a list of interfaces
+ typedef std::list<Iface> IfaceCollection;
+
+ /// IfaceMgr is a singleton class. This method returns reference
+ /// to its sole instance.
+ ///
+ /// @return the only existing instance of interface manager
+ static IfaceMgr& instance();
+
+ /// @brief Returns interface with specified interface index
+ ///
+ /// @param ifindex index of searched interface
+ ///
+ /// @return interface with requested index (or NULL if no such
+ /// interface is present)
+ ///
+ Iface* getIface(int ifindex);
+
+ /// @brief Returns interface with specified interface name
+ ///
+ /// @param ifname name of searched interface
+ ///
+ /// @return interface with requested name (or NULL if no such
+ /// interface is present)
+ ///
+ Iface*
+ getIface(const std::string& ifname);
+
+ /// @brief Returns container with all interfaces.
+ ///
+ /// This reference is only valid as long as IfaceMgr is valid. However,
+ /// since IfaceMgr is a singleton and is expected to be destroyed after
+ /// main() function completes, you should not worry much about this.
+ ///
+ /// @return container with all interfaces.
+ const IfaceCollection& getIfaces() { return ifaces_; }
+
+ /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+ ///
+ /// This method takes Pkt6 (see overloaded implementation that takes
+ /// Pkt4) and chooses appropriate socket to send it. This method
+ /// may throw BadValue if specified packet does not have outbound
+ /// interface specified, no such interface exists, or specified
+ /// interface does not have any appropriate sockets open.
+ ///
+ /// @param pkt a packet to be transmitted
+ ///
+ /// @return a socket descriptor
+ uint16_t getSocket(const isc::dhcp::Pkt6& pkt);
+
+ /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+ ///
+ /// This method takes Pkt4 (see overloaded implementation that takes
+ /// Pkt6) and chooses appropriate socket to send it. This method
+ /// may throw BadValue if specified packet does not have outbound
+ /// interface specified, no such interface exists, or specified
+ /// interface does not have any appropriate sockets open.
+ ///
+ /// @param pkt a packet to be transmitted
+ ///
+ /// @return a socket descriptor
+ uint16_t getSocket(const isc::dhcp::Pkt4& pkt);
+
+ /// debugging method that prints out all available interfaces
+ ///
+ /// @param out specifies stream to print list of interfaces to
+ void
+ printIfaces(std::ostream& out = std::cout);
+
+ /// @brief Sends an IPv6 packet.
+ ///
+ /// Sends an IPv6 packet. All parameters for actual transmission are specified in
+ /// Pkt6 structure itself. That includes destination address, src/dst port
+ /// and interface over which data will be sent.
+ ///
+ /// @param pkt packet to be sent
+ ///
+ /// @return true if sending was successful
+ bool send(const Pkt6Ptr& pkt);
+
+ /// @brief Sends an IPv4 packet.
+ ///
+ /// Sends an IPv4 packet. All parameters for actual transmission are specified
+ /// in Pkt4 structure itself. That includes destination address, src/dst
+ /// port and interface over which data will be sent.
+ ///
+ /// @param pkt a packet to be sent
+ ///
+ /// @return true if sending was successful
+ bool send(const Pkt4Ptr& pkt);
+
+ /// @brief Tries to receive IPv6 packet over open IPv6 sockets.
+ ///
+ /// Attempts to receive a single IPv6 packet of any of the open IPv6 sockets.
+ /// If reception is successful and all information about its sender
+ /// are obtained, Pkt6 object is created and returned.
+ ///
+ /// TODO Start using select() and add timeout to be able
+ /// to not wait infinitely, but rather do something useful
+ /// (e.g. remove expired leases)
+ ///
+ /// @return Pkt6 object representing received packet (or NULL)
+ Pkt6Ptr receive6();
+
+ /// @brief Tries to receive IPv4 packet over open IPv4 sockets.
+ ///
+ /// Attempts to receive a single IPv4 packet of any of the open IPv4 sockets.
+ /// If reception is successful and all information about its sender
+ /// are obtained, Pkt4 object is created and returned.
+ ///
+ /// @param timeout specifies timeout (in seconds)
+ ///
+ /// @return Pkt4 object representing received packet (or NULL)
+ Pkt4Ptr receive4(uint32_t timeout);
+
+ /// Opens UDP/IP socket and binds it to address, interface and port.
+ ///
+ /// Specific type of socket (UDP/IPv4 or UDP/IPv6) depends on passed addr
+ /// family.
+ ///
+ /// @param ifname name of the interface
+ /// @param addr address to be bound.
+ /// @param port UDP port.
+ ///
+ /// Method will throw if socket creation, socket binding or multicast
+ /// join fails.
+ ///
+ /// @return socket descriptor, if socket creation, binding and multicast
+ /// group join were all successful.
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr, const uint16_t port);
+
+ /// Opens IPv6 sockets on detected interfaces.
+ ///
+ /// Will throw exception if socket creation fails.
+ ///
+ /// @param port specifies port number (usually DHCP6_SERVER_PORT)
+ ///
+ /// @return true if any sockets were open
+ bool openSockets6(const uint16_t port = DHCP6_SERVER_PORT);
+
+ /// @brief Closes all open sockets.
+ /// Is used in destructor, but also from Dhcpv4_srv and Dhcpv6_srv classes.
+ void closeSockets();
+
+ /// Opens IPv4 sockets on detected interfaces.
+ /// Will throw exception if socket creation fails.
+ ///
+ /// @param port specifies port number (usually DHCP4_SERVER_PORT)
+ ///
+ /// @return true if any sockets were open
+ bool openSockets4(const uint16_t port = DHCP4_SERVER_PORT);
+
+ /// @brief returns number of detected interfaces
+ ///
+ /// @return number of detected interfaces
+ uint16_t countIfaces() { return ifaces_.size(); }
+
+ /// @brief Sets session socket and a callback
+ ///
+ /// Specifies session socket and a callback that will be called
+ /// when data will be received over that socket.
+ ///
+ /// @param socketfd socket descriptor
+ /// @param callback callback function
+ void set_session_socket(int socketfd, SessionCallback callback) {
+ session_socket_ = socketfd;
+ session_callback_ = callback;
+ }
+
+ /// A value of socket descriptor representing "not specified" state.
+ static const int INVALID_SOCKET = -1;
+
+ // don't use private, we need derived classes in tests
+protected:
+
+ /// @brief Protected constructor.
+ ///
+ /// Protected constructor. This is a singleton class. We don't want
+ /// anyone to create instances of IfaceMgr. Use instance() method instead.
+ IfaceMgr();
+
+ virtual ~IfaceMgr();
+
+ /// @brief Opens IPv4 socket.
+ ///
+ /// Please do not use this method directly. Use openSocket instead.
+ ///
+ /// This method may throw exception if socket creation fails.
+ ///
+ /// @param iface reference to interface structure.
+ /// @param addr an address the created socket should be bound to
+ /// @param port a port that created socket should be bound to
+ ///
+ /// @return socket descriptor
+ int openSocket4(Iface& iface, const isc::asiolink::IOAddress& addr, uint16_t port);
+
+ /// @brief Opens IPv6 socket.
+ ///
+ /// Please do not use this method directly. Use openSocket instead.
+ ///
+ /// This method may throw exception if socket creation fails.
+ ///
+ /// @param iface reference to interface structure.
+ /// @param addr an address the created socket should be bound to
+ /// @param port a port that created socket should be bound to
+ ///
+ /// @return socket descriptor
+ int openSocket6(Iface& iface, const isc::asiolink::IOAddress& addr, uint16_t port);
+
+ /// @brief Adds an interface to list of known interfaces.
+ ///
+ /// @param iface reference to Iface object.
+ void addInterface(const Iface& iface) {
+ ifaces_.push_back(iface);
+ }
+
+ /// @brief Detects network interfaces.
+ ///
+ /// This method will eventually detect available interfaces. For now
+ /// it offers stub implementation. First interface name and link-local
+ /// IPv6 address is read from intefaces.txt file.
+ void
+ detectIfaces();
+
+ /// @brief Stub implementation of network interface detection.
+ ///
+ /// This implementations reads a single line from interfaces.txt file
+ /// and pretends to detect such interface. First interface name and
+ /// link-local IPv6 address or IPv4 address is read from the
+ /// intefaces.txt file.
+ void
+ stubDetectIfaces();
+
+ // TODO: having 2 maps (ifindex->iface and ifname->iface would)
+ // probably be better for performance reasons
+
+ /// List of available interfaces
+ IfaceCollection ifaces_;
+
+ /// a pointer to a sole instance of this class (a singleton)
+ static IfaceMgr * instance_;
+
+ // TODO: Also keep this interface on Iface once interface detection
+ // is implemented. We may need it e.g. to close all sockets on
+ // specific interface
+ //int recvsock_; // TODO: should be fd_set eventually, but we have only
+ //int sendsock_; // 2 sockets for now. Will do for until next release
+
+ // we can't use the same socket, as receiving socket
+ // is bound to multicast address. And we all know what happens
+ // to people who try to use multicast as source address.
+
+ /// length of the control_buf_ array
+ size_t control_buf_len_;
+
+ /// control-buffer, used in transmission and reception
+ boost::scoped_array<char> control_buf_;
+
+ /// @brief A wrapper for OS-specific operations before sending IPv4 packet
+ ///
+ /// @param m message header (will be later used for sendmsg() call)
+ /// @param control_buf buffer to be used during transmission
+ /// @param control_buf_len buffer length
+ /// @param pkt packet to be sent
+ void os_send4(struct msghdr& m, boost::scoped_array<char>& control_buf,
+ size_t control_buf_len, const Pkt4Ptr& pkt);
+
+ /// @brief OS-specific operations during IPv4 packet reception
+ ///
+ /// @param m message header (was used during recvmsg() call)
+ /// @param pkt packet received (some fields will be set here)
+ ///
+ /// @return true if successful, false otherwise
+ bool os_receive4(struct msghdr& m, Pkt4Ptr& pkt);
+
+ /// socket descriptor of the session socket
+ int session_socket_;
+
+ /// a callback that will be called when data arrives over session_socket_
+ SessionCallback session_callback_;
+private:
+
+ /// @brief Creates a single instance of this class (a singleton implementation)
+ static void
+ instanceCreate();
+
+ /// @brief Joins IPv6 multicast group on a socket.
+ ///
+ /// Socket must be created and bound to an address. Note that this
+ /// address is different than the multicast address. For example DHCPv6
+ /// server should bind its socket to link-local address (fe80::1234...)
+ /// and later join ff02::1:2 multicast group.
+ ///
+ /// @param sock socket fd (socket must be bound)
+ /// @param ifname interface name (for link-scoped multicast groups)
+ /// @param mcast multicast address to join (e.g. "ff02::1:2")
+ ///
+ /// @return true if multicast join was successful
+ ///
+ bool
+ joinMulticast(int sock, const std::string& ifname,
+ const std::string& mcast);
+
+};
+
+}; // namespace isc::dhcp
+}; // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/iface_mgr_bsd.cc b/src/lib/dhcp/iface_mgr_bsd.cc
new file mode 100644
index 0000000..e3f11a1
--- /dev/null
+++ b/src/lib/dhcp/iface_mgr_bsd.cc
@@ -0,0 +1,55 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#if defined(OS_BSD)
+
+#include <dhcp/iface_mgr.h>
+#include <exceptions/exceptions.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+namespace isc {
+namespace dhcp {
+
+void
+IfaceMgr::detectIfaces() {
+ /// @todo do the actual detection on BSDs. Currently just calling
+ /// stub implementation.
+ stubDetectIfaces();
+}
+
+void IfaceMgr::os_send4(struct msghdr& /*m*/,
+ boost::scoped_array<char>& /*control_buf*/,
+ size_t /*control_buf_len*/,
+ const Pkt4Ptr& /*pkt*/) {
+ // @todo: Are there any specific actions required before sending IPv4 packet
+ // on BSDs? See iface_mgr_linux.cc for working Linux implementation.
+}
+
+bool IfaceMgr::os_receive4(struct msghdr& /*m*/, Pkt4Ptr& /*pkt*/) {
+ // @todo: Are there any specific actions required before receiving IPv4 packet
+ // on BSDs? See iface_mgr_linux.cc for working Linux implementation.
+
+ return (true); // pretend that we have everything set up for reception.
+}
+
+} // end of isc::dhcp namespace
+} // end of dhcp namespace
+
+#endif
diff --git a/src/lib/dhcp/iface_mgr_linux.cc b/src/lib/dhcp/iface_mgr_linux.cc
new file mode 100644
index 0000000..90431de
--- /dev/null
+++ b/src/lib/dhcp/iface_mgr_linux.cc
@@ -0,0 +1,576 @@
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// @file
+/// Access to interface information on Linux is via netlink, a socket-based
+/// method for transferring information between the kernel and user processes.
+///
+/// For detailed information about netlink interface, please refer to
+/// http://en.wikipedia.org/wiki/Netlink and RFC3549. Comments in the
+/// detectIfaces() method (towards the end of this file) provide an overview
+/// on how the netlink interface is used here.
+///
+/// Note that this interface is very robust and allows many operations:
+/// add/get/set/delete links, addresses, routes, queuing, manipulation of
+/// traffic classes, manipulation of neighbourhood tables and even the ability
+/// to do something with address labels. Getting a list of interfaces with
+/// addresses configured on it is just a small subset of all possible actions.
+
+#include <config.h>
+
+#if defined(OS_LINUX)
+
+#include <dhcp/iface_mgr.h>
+#include <exceptions/exceptions.h>
+
+#include <stdint.h>
+#include <net/if.h>
+#include <linux/rtnetlink.h>
+#include <boost/array.hpp>
+#include <boost/static_assert.hpp>
+#include <dhcp/iface_mgr.h>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <util/io/sockaddr_util.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+using namespace isc::util::io::internal;
+
+BOOST_STATIC_ASSERT(IFLA_MAX>=IFA_MAX);
+
+namespace {
+
+/// @brief This class offers utility methods for netlink connection.
+///
+/// See IfaceMgr::detectIfaces() (Linux implementation, towards the end of this
+/// file) for example usage.
+class Netlink
+{
+public:
+
+/// @brief Holds pointers to netlink messages.
+///
+/// netlink (a Linux interface for getting information about network
+/// interfaces) uses memory aliasing. Linux kernel returns a memory
+/// blob that should be interpreted as series of nlmessages. There
+/// are different nlmsg structures defined with varying size. They
+/// have one thing common - inital fields are laid out in the same
+/// way as nlmsghdr. Therefore different messages can be represented
+/// as nlmsghdr with followed variable number of bytes that are
+/// message-specific. The only reasonable way to represent this in
+/// C++ is to use vector of pointers to nlmsghdr (the common structure).
+ typedef vector<nlmsghdr*> NetlinkMessages;
+
+/// @brief Holds pointers to interface or address attributes.
+///
+/// Note that to get address info, a shorter (IFA_MAX rather than IFLA_MAX)
+/// table could be used, but we will use the bigger one anyway to
+/// make the code reusable.
+///
+/// rtattr is a generic structure, similar to sockaddr. It is defined
+/// in linux/rtnetlink.h and shown here for documentation purposes only:
+///
+/// struct rtattr {
+/// unsigned short<>rta_len;
+/// unsigned short<>rta_type;
+/// };
+ typedef boost::array<struct rtattr*, IFLA_MAX + 1> RTattribPtrs;
+
+ Netlink() : fd_(-1), seq_(0), dump_(0) {
+ memset(&local_, 0, sizeof(struct sockaddr_nl));
+ memset(&peer_, 0, sizeof(struct sockaddr_nl));
+ }
+
+ ~Netlink() {
+ rtnl_close_socket();
+ }
+
+
+ void rtnl_open_socket();
+ void rtnl_send_request(int family, int type);
+ void rtnl_store_reply(NetlinkMessages& storage, const nlmsghdr* msg);
+ void parse_rtattr(RTattribPtrs& table, rtattr* rta, int len);
+ void ipaddrs_get(IfaceMgr::Iface& iface, NetlinkMessages& addr_info);
+ void rtnl_process_reply(NetlinkMessages& info);
+ void release_list(NetlinkMessages& messages);
+ void rtnl_close_socket();
+
+private:
+ int fd_; // Netlink file descriptor
+ sockaddr_nl local_; // Local addresses
+ sockaddr_nl peer_; // Remote address
+ uint32_t seq_; // Counter used for generating unique sequence numbers
+ uint32_t dump_; // Number of expected message response
+};
+
+/// @brief defines a size of a sent netlink buffer
+const static size_t SNDBUF_SIZE = 32768;
+
+/// @brief defines a size of a received netlink buffer
+const static size_t RCVBUF_SIZE = 32768;
+
+/// @brief Opens netlink socket and initializes handle structure.
+///
+/// @exception Unexpected Thrown if socket configuration fails.
+void Netlink::rtnl_open_socket() {
+
+ fd_ = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (fd_ < 0) {
+ isc_throw(Unexpected, "Failed to create NETLINK socket.");
+ }
+
+ if (setsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &SNDBUF_SIZE, sizeof(SNDBUF_SIZE)) < 0) {
+ isc_throw(Unexpected, "Failed to set send buffer in NETLINK socket.");
+ }
+
+ if (setsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &RCVBUF_SIZE, sizeof(RCVBUF_SIZE)) < 0) {
+ isc_throw(Unexpected, "Failed to set receive buffer in NETLINK socket.");
+ }
+
+ local_.nl_family = AF_NETLINK;
+ local_.nl_groups = 0;
+
+ if (bind(fd_, convertSockAddr(&local_), sizeof(local_)) < 0) {
+ isc_throw(Unexpected, "Failed to bind netlink socket.");
+ }
+
+ socklen_t addr_len = sizeof(local_);
+ if (getsockname(fd_, convertSockAddr(&local_), &addr_len) < 0) {
+ isc_throw(Unexpected, "Getsockname for netlink socket failed.");
+ }
+
+ // just 2 sanity checks and we are done
+ if ( (addr_len != sizeof(local_)) ||
+ (local_.nl_family != AF_NETLINK) ) {
+ isc_throw(Unexpected, "getsockname() returned unexpected data for netlink socket.");
+ }
+}
+
+/// @brief Closes netlink communication socket
+void Netlink::rtnl_close_socket() {
+ if (fd_ != -1) {
+ close(fd_);
+ }
+ fd_ = -1;
+}
+
+/// @brief Sends request over NETLINK socket.
+///
+/// @param family requested information family.
+/// @param type request type (RTM_GETLINK or RTM_GETADDR).
+void Netlink::rtnl_send_request(int family, int type) {
+ struct Req {
+ nlmsghdr netlink_header;
+ rtgenmsg generic;
+ };
+ Req req; // we need this type named for offsetof() used in assert
+ struct sockaddr_nl nladdr;
+
+ // do a sanity check. Verify that Req structure is aligned properly
+ BOOST_STATIC_ASSERT(sizeof(nlmsghdr) == offsetof(Req, generic));
+
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+
+ // According to netlink(7) manpage, mlmsg_seq must be set to a sequence
+ // number and is used to track messages. That is just a value that is
+ // opaque to kernel, and user-space code is supposed to use it to match
+ // incoming responses to sent requests. That is not really useful as we
+ // send a single request and get a single response at a time. However, we
+ // obey the man page suggestion and just set this to monotonically
+ // increasing numbers.
+ seq_++;
+
+ // This will be used to finding correct response (responses
+ // sent by kernel are supposed to have the same sequence number
+ // as the request we sent).
+ dump_ = seq_;
+
+ memset(&req, 0, sizeof(req));
+ req.netlink_header.nlmsg_len = sizeof(req);
+ req.netlink_header.nlmsg_type = type;
+ req.netlink_header.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
+ req.netlink_header.nlmsg_pid = 0;
+ req.netlink_header.nlmsg_seq = seq_;
+ req.generic.rtgen_family = family;
+
+ int status = sendto(fd_, static_cast<void*>(&req), sizeof(req), 0,
+ static_cast<struct sockaddr*>(static_cast<void*>(&nladdr)),
+ sizeof(nladdr));
+
+ if (status<0) {
+ isc_throw(Unexpected, "Failed to send " << sizeof(nladdr)
+ << " bytes over netlink socket.");
+ }
+}
+
+/// @brief Appends nlmsg to a storage.
+///
+/// This method copies pointed nlmsg to a newly allocated memory
+/// and adds it to storage.
+///
+/// @param storage A vector that holds pointers to netlink messages. The caller
+/// is responsible for freeing the pointed-to messages.
+/// @param msg A netlink message to be added.
+void Netlink::rtnl_store_reply(NetlinkMessages& storage, const struct nlmsghdr *msg)
+{
+ // we need to make a copy of this message. We really can't allocate
+ // nlmsghdr directly as it is only part of the structure. There are
+ // many message types with varying lengths and a common header.
+ struct nlmsghdr* copy = reinterpret_cast<struct nlmsghdr*>(new char[msg->nlmsg_len]);
+ memcpy(copy, msg, msg->nlmsg_len);
+
+ // push_back copies only pointer content, not the pointed-to object.
+ storage.push_back(copy);
+}
+
+/// @brief Parses rtattr message.
+///
+/// Some netlink messages represent address information. Such messages
+/// are concatenated collection of rtaddr structures. This function
+/// iterates over that list and stores pointers to those messages in
+/// flat array (table).
+///
+/// @param table rtattr Messages will be stored here
+/// @param rta Pointer to first rtattr object
+/// @param len Length (in bytes) of concatenated rtattr list.
+void Netlink::parse_rtattr(RTattribPtrs& table, struct rtattr* rta, int len)
+{
+ std::fill(table.begin(), table.end(), static_cast<struct rtattr*>(NULL));
+ // RTA_OK and RTA_NEXT() are macros defined in linux/rtnetlink.h
+ // they are used to handle rtattributes. RTA_OK checks if the structure
+ // pointed by rta is reasonable and passes all sanity checks.
+ // RTA_NEXT() returns pointer to the next rtattr structure that
+ // immediately follows pointed rta structure. See aforementioned
+ // header for details.
+ while (RTA_OK(rta, len)) {
+ if (rta->rta_type < table.size()) {
+ table[rta->rta_type] = rta;
+ }
+ rta = RTA_NEXT(rta,len);
+ }
+ if (len) {
+ isc_throw(Unexpected, "Failed to parse RTATTR in netlink message.");
+ }
+}
+
+/// @brief Parses addr_info and appends appropriate addresses to Iface object.
+///
+/// Netlink is a fine, but convoluted interface. It returns a concatenated
+/// collection of netlink messages. Some of those messages convey information
+/// about addresses. Those messages are in fact appropriate header followed
+/// by concatenated lists of rtattr structures that define various pieces
+/// of address information.
+///
+/// @param iface interface representation (addresses will be added here)
+/// @param addr_info collection of parsed netlink messages
+void Netlink::ipaddrs_get(IfaceMgr::Iface& iface, NetlinkMessages& addr_info) {
+ uint8_t addr[V6ADDRESS_LEN];
+ RTattribPtrs rta_tb;
+
+ for (NetlinkMessages::const_iterator msg = addr_info.begin();
+ msg != addr_info.end(); ++msg) {
+ ifaddrmsg* ifa = static_cast<ifaddrmsg*>(NLMSG_DATA(*msg));
+
+ // These are not the addresses you are looking for
+ if (ifa->ifa_index != iface.getIndex()) {
+ continue;
+ }
+
+ if ((ifa->ifa_family == AF_INET6) || (ifa->ifa_family == AF_INET)) {
+ std::fill(rta_tb.begin(), rta_tb.end(), static_cast<rtattr*>(NULL));
+ parse_rtattr(rta_tb, IFA_RTA(ifa), (*msg)->nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
+ if (!rta_tb[IFA_LOCAL]) {
+ rta_tb[IFA_LOCAL] = rta_tb[IFA_ADDRESS];
+ }
+ if (!rta_tb[IFA_ADDRESS]) {
+ rta_tb[IFA_ADDRESS] = rta_tb[IFA_LOCAL];
+ }
+
+ memcpy(addr, RTA_DATA(rta_tb[IFLA_ADDRESS]),
+ ifa->ifa_family==AF_INET?V4ADDRESS_LEN:V6ADDRESS_LEN);
+ IOAddress a = IOAddress::from_bytes(ifa->ifa_family, addr);
+ iface.addAddress(a);
+
+ /// TODO: Read lifetimes of configured IPv6 addresses
+ }
+ }
+}
+
+/// @brief Processes reply received over netlink socket.
+///
+/// This method parses the received buffer (a collection of concatenated
+/// netlink messages), copies each received message to newly allocated
+/// memory and stores pointers to it in the "info" container.
+///
+/// @param info received netlink messages will be stored here. It is the
+/// caller's responsibility to release the memory associated with the
+/// messages by calling the release_list() method.
+void Netlink::rtnl_process_reply(NetlinkMessages& info) {
+ sockaddr_nl nladdr;
+ iovec iov;
+ msghdr msg;
+ memset(&msg, 0, sizeof(msghdr));
+ msg.msg_name = &nladdr;
+ msg.msg_namelen = sizeof(nladdr);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ char buf[RCVBUF_SIZE];
+
+ iov.iov_base = buf;
+ iov.iov_len = sizeof(buf);
+ while (true) {
+ int status = recvmsg(fd_, &msg, 0);
+
+ if (status < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+ isc_throw(Unexpected, "Error " << errno
+ << " while processing reply from netlink socket.");
+ }
+
+ if (status == 0) {
+ isc_throw(Unexpected, "EOF while reading netlink socket.");
+ }
+
+ nlmsghdr* header = static_cast<nlmsghdr*>(static_cast<void*>(buf));
+ while (NLMSG_OK(header, status)) {
+
+ // Received a message not addressed to our process, or not
+ // with a sequence number we are expecting. Ignore, and
+ // look at the next one.
+ if (nladdr.nl_pid != 0 ||
+ header->nlmsg_pid != local_.nl_pid ||
+ header->nlmsg_seq != dump_) {
+ header = NLMSG_NEXT(header, status);
+ continue;
+ }
+
+ if (header->nlmsg_type == NLMSG_DONE) {
+ // End of message.
+ return;
+ }
+
+ if (header->nlmsg_type == NLMSG_ERROR) {
+ nlmsgerr* err = static_cast<nlmsgerr*>(NLMSG_DATA(header));
+ if (header->nlmsg_len < NLMSG_LENGTH(sizeof(struct nlmsgerr))) {
+ // We are really out of luck here. We can't even say what is
+ // wrong as error message is truncated. D'oh.
+ isc_throw(Unexpected, "Netlink reply read failed.");
+ } else {
+ isc_throw(Unexpected, "Netlink reply read error " << -err->error);
+ }
+ // Never happens we throw before we reach here
+ return;
+ }
+
+ // store the data
+ rtnl_store_reply(info, header);
+
+ header = NLMSG_NEXT(header, status);
+ }
+ if (msg.msg_flags & MSG_TRUNC) {
+ isc_throw(Unexpected, "Message received over netlink truncated.");
+ }
+ if (status) {
+ isc_throw(Unexpected, "Trailing garbage of " << status << " bytes received over netlink.");
+ }
+ }
+}
+
+/// @brief releases nlmsg structure
+///
+/// @param messages Set of messages to be freed.
+void Netlink::release_list(NetlinkMessages& messages) {
+ // let's free local copies of stored messages
+ for (NetlinkMessages::iterator msg = messages.begin(); msg != messages.end(); ++msg) {
+ delete (*msg);
+ }
+
+ // ang get rid of the message pointers as well
+ messages.clear();
+}
+
+} // end of anonymous namespace
+
+namespace isc {
+namespace dhcp {
+
+/// @brief Detect available interfaces on Linux systems.
+///
+/// Uses the socket-based netlink protocol to retrieve the list of interfaces
+/// from the Linux kernel.
+void IfaceMgr::detectIfaces() {
+ cout << "Linux: detecting interfaces." << endl;
+
+ // Copies of netlink messages about links will be stored here.
+ Netlink::NetlinkMessages link_info;
+
+ // Copies of netlink messages about addresses will be stored here.
+ Netlink::NetlinkMessages addr_info;
+
+ // Socket descriptors and other rtnl-related parameters.
+ Netlink nl;
+
+ // Table with pointers to address attributes.
+ Netlink::RTattribPtrs attribs_table;
+ std::fill(attribs_table.begin(), attribs_table.end(),
+ static_cast<struct rtattr*>(NULL));
+
+ // Open socket
+ nl.rtnl_open_socket();
+
+ // Now we have open functional socket, let's use it!
+ // Ask for list of network interfaces...
+ nl.rtnl_send_request(AF_PACKET, RTM_GETLINK);
+
+ // Get reply and store it in link_info list:
+ // response is received as with any other socket - just a series
+ // of bytes. They are representing collection of netlink messages
+ // concatenated together. rtnl_process_reply will parse this
+ // buffer, copy each message to a newly allocated memory and
+ // store pointers to it in link_info. This allocated memory will
+ // be released later. See release_info(link_info) below.
+ nl.rtnl_process_reply(link_info);
+
+ // Now ask for list of addresses (AF_UNSPEC = of any family)
+ // Let's repeat, but this time ask for any addresses.
+ // That includes IPv4, IPv6 and any other address families that
+ // are happen to be supported by this system.
+ nl.rtnl_send_request(AF_UNSPEC, RTM_GETADDR);
+
+ // Get reply and store it in addr_info list.
+ // Again, we will allocate new memory and store messages in
+ // addr_info. It will be released later using release_info(addr_info).
+ nl.rtnl_process_reply(addr_info);
+
+ // Now build list with interface names
+ for (Netlink::NetlinkMessages::iterator msg = link_info.begin();
+ msg != link_info.end(); ++msg) {
+ // Required to display information about interface
+ struct ifinfomsg* interface_info = static_cast<ifinfomsg*>(NLMSG_DATA(*msg));
+ int len = (*msg)->nlmsg_len;
+ len -= NLMSG_LENGTH(sizeof(*interface_info));
+ nl.parse_rtattr(attribs_table, IFLA_RTA(interface_info), len);
+
+ // valgrind reports *possible* memory leak in the line below, but it is
+ // bogus. Nevertheless, the whole interface definition has been split
+ // into three separate steps for easier debugging.
+ const char* tmp = static_cast<const char*>(RTA_DATA(attribs_table[IFLA_IFNAME]));
+ string iface_name(tmp); // <--- bogus valgrind warning here
+ Iface iface = Iface(iface_name, interface_info->ifi_index);
+
+ iface.setHWType(interface_info->ifi_type);
+ iface.setFlags(interface_info->ifi_flags);
+
+ // Does inetface have LL_ADDR?
+ if (attribs_table[IFLA_ADDRESS]) {
+ iface.setMac(static_cast<const uint8_t*>(RTA_DATA(attribs_table[IFLA_ADDRESS])),
+ RTA_PAYLOAD(attribs_table[IFLA_ADDRESS]));
+ }
+ else {
+ // Tunnels can have no LL_ADDR. RTA_PAYLOAD doesn't check it and
+ // try to dereference it in this manner
+ }
+
+ nl.ipaddrs_get(iface, addr_info);
+ ifaces_.push_back(iface);
+ }
+
+ nl.release_list(link_info);
+ nl.release_list(addr_info);
+
+ printIfaces();
+}
+
+/// @brief sets flag_*_ fields.
+///
+/// This implementation is OS-specific as bits have different meaning
+/// on different OSes.
+///
+/// @param flags flags bitfield read from OS
+void IfaceMgr::Iface::setFlags(uint32_t flags) {
+ flags_ = flags;
+
+ flag_loopback_ = flags & IFF_LOOPBACK;
+ flag_up_ = flags & IFF_UP;
+ flag_running_ = flags & IFF_RUNNING;
+ flag_multicast_ = flags & IFF_MULTICAST;
+ flag_broadcast_ = flags & IFF_BROADCAST;
+}
+
+void IfaceMgr::os_send4(struct msghdr& m, boost::scoped_array<char>& control_buf,
+ size_t control_buf_len, const Pkt4Ptr& pkt) {
+
+ // Setting the interface is a bit more involved.
+ //
+ // We have to create a "control message", and set that to
+ // define the IPv4 packet information. We could set the
+ // source address if we wanted, but we can safely let the
+ // kernel decide what that should be.
+ m.msg_control = &control_buf[0];
+ m.msg_controllen = control_buf_len;
+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&m);
+ cmsg->cmsg_level = IPPROTO_IP;
+ cmsg->cmsg_type = IP_PKTINFO;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
+ struct in_pktinfo* pktinfo =(struct in_pktinfo *)CMSG_DATA(cmsg);
+ memset(pktinfo, 0, sizeof(struct in_pktinfo));
+ pktinfo->ipi_ifindex = pkt->getIndex();
+ m.msg_controllen = cmsg->cmsg_len;
+}
+
+bool IfaceMgr::os_receive4(struct msghdr& m, Pkt4Ptr& pkt) {
+ struct cmsghdr* cmsg;
+ struct in_pktinfo* pktinfo;
+ struct in_addr to_addr;
+
+ memset(&to_addr, 0, sizeof(to_addr));
+
+ cmsg = CMSG_FIRSTHDR(&m);
+ while (cmsg != NULL) {
+ if ((cmsg->cmsg_level == IPPROTO_IP) &&
+ (cmsg->cmsg_type == IP_PKTINFO)) {
+ pktinfo = (struct in_pktinfo*)CMSG_DATA(cmsg);
+
+ pkt->setIndex(pktinfo->ipi_ifindex);
+ pkt->setLocalAddr(IOAddress(htonl(pktinfo->ipi_addr.s_addr)));
+ return (true);
+
+ // This field is useful, when we are bound to unicast
+ // address e.g. 192.0.2.1 and the packet was sent to
+ // broadcast. This will return broadcast address, not
+ // the address we are bound to.
+
+ // IOAddress tmp(htonl(pktinfo->ipi_spec_dst.s_addr));
+ // cout << "The other addr is: " << tmp.toText() << endl;
+
+ // Perhaps we should uncomment this:
+ // to_addr = pktinfo->ipi_spec_dst;
+ }
+ cmsg = CMSG_NXTHDR(&m, cmsg);
+ }
+
+ return (false);
+}
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
+
+#endif // if defined(LINUX)
diff --git a/src/lib/dhcp/iface_mgr_sun.cc b/src/lib/dhcp/iface_mgr_sun.cc
new file mode 100644
index 0000000..5847906
--- /dev/null
+++ b/src/lib/dhcp/iface_mgr_sun.cc
@@ -0,0 +1,55 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#if defined(OS_SUN)
+
+#include <dhcp/iface_mgr.h>
+#include <exceptions/exceptions.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+namespace isc {
+namespace dhcp {
+
+void
+IfaceMgr::detectIfaces() {
+ /// @todo do the actual detection on Solaris. Currently just calling
+ /// stub implementation.
+ stubDetectIfaces();
+}
+
+void IfaceMgr::os_send4(struct msghdr& /*m*/,
+ boost::scoped_array<char>& /*control_buf*/,
+ size_t /*control_buf_len*/,
+ const Pkt4Ptr& /*pkt*/) {
+ // @todo: Are there any specific actions required before sending IPv4 packet
+ // on BSDs? See iface_mgr_linux.cc for working Linux implementation.
+}
+
+bool IfaceMgr::os_receive4(struct msghdr& /*m*/, Pkt4Ptr& /*pkt*/) {
+ // @todo: Are there any specific actions required before receiving IPv4 packet
+ // on BSDs? See iface_mgr_linux.cc for working Linux implementation.
+
+ return (true); // pretend that we have everything set up for reception.
+}
+
+} // end of isc::dhcp namespace
+} // end of dhcp namespace
+
+#endif
diff --git a/src/lib/dhcp/libdhcp++.cc b/src/lib/dhcp/libdhcp++.cc
new file mode 100644
index 0000000..2cfea13
--- /dev/null
+++ b/src/lib/dhcp/libdhcp++.cc
@@ -0,0 +1,187 @@
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <boost/shared_array.hpp>
+#include <boost/shared_ptr.hpp>
+#include <util/buffer.h>
+#include <dhcp/libdhcp++.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+// static array with factories for options
+std::map<unsigned short, Option::Factory*> LibDHCP::v4factories_;
+
+// static array with factories for options
+std::map<unsigned short, Option::Factory*> LibDHCP::v6factories_;
+
+
+size_t LibDHCP::unpackOptions6(const OptionBuffer& buf,
+ isc::dhcp::Option::OptionCollection& options) {
+ size_t offset = 0;
+ size_t end = buf.size();
+
+ while (offset +4 <= end) {
+ uint16_t opt_type = buf[offset] * 256 + buf[offset + 1];
+ offset += 2;
+ uint16_t opt_len = buf[offset] * 256 + buf[offset + 1];
+ offset += 2;
+
+ if (offset + opt_len > end) {
+ cout << "Option " << opt_type << " truncated." << endl;
+ return (offset);
+ }
+ OptionPtr opt;
+ switch (opt_type) {
+ case D6O_IA_NA:
+ case D6O_IA_PD:
+ // cout << "Creating Option6IA" << endl;
+ opt = OptionPtr(new Option6IA(opt_type,
+ buf.begin() + offset,
+ buf.begin() + offset + opt_len));
+ break;
+ case D6O_IAADDR:
+ // cout << "Creating Option6IAAddr" << endl;
+ opt = OptionPtr(new Option6IAAddr(opt_type,
+ buf.begin() + offset,
+ buf.begin() + offset + opt_len));
+ break;
+ default:
+ // cout << "Creating Option" << endl;
+ opt = OptionPtr(new Option(Option::V6,
+ opt_type,
+ buf.begin() + offset,
+ buf.begin() + offset + opt_len));
+ break;
+ }
+ // add option to options
+ options.insert(std::make_pair(opt_type, opt));
+ offset += opt_len;
+ }
+
+ return (offset);
+}
+
+size_t LibDHCP::unpackOptions4(const OptionBuffer& buf,
+ isc::dhcp::Option::OptionCollection& options) {
+ size_t offset = 0;
+
+ // 2 byte - header of DHCPv4 option
+ while (offset + 1 <= buf.size()) {
+ uint8_t opt_type = buf[offset++];
+
+ // DHO_END is a special, one octet long option
+ if (opt_type == DHO_END)
+ return (offset); // just return. Don't need to add DHO_END option
+
+ // DHO_PAD is just a padding after DHO_END. Let's continue parsing
+ // in case we receive a message without DHO_END.
+ if (opt_type == DHO_PAD)
+ continue;
+
+ if (offset + 1 >= buf.size()) {
+ isc_throw(OutOfRange, "Attempt to parse truncated option "
+ << opt_type);
+ }
+
+ uint8_t opt_len = buf[offset++];
+ if (offset + opt_len > buf.size()) {
+ isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+ << offset + opt_len << " bytes from " << buf.size()
+ << "-byte long buffer.");
+ }
+
+ OptionPtr opt;
+ switch(opt_type) {
+ default:
+ opt = OptionPtr(new Option(Option::V4, opt_type,
+ buf.begin()+offset,
+ buf.begin()+offset+opt_len));
+ }
+
+ options.insert(std::make_pair(opt_type, opt));
+ offset += opt_len;
+ }
+ return (offset);
+}
+
+void LibDHCP::packOptions6(isc::util::OutputBuffer &buf,
+ const isc::dhcp::Option::OptionCollection& options) {
+ try {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end(); ++it) {
+ it->second->pack(buf);
+ }
+ }
+ catch (const Exception&) {
+ cout << "Packet build failed (Option build failed)." << endl;
+ throw;
+ }
+}
+
+void
+LibDHCP::packOptions(isc::util::OutputBuffer& buf,
+ const Option::OptionCollection& options) {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end(); ++it) {
+ it->second->pack4(buf);
+ }
+}
+
+void LibDHCP::OptionFactoryRegister(Option::Universe u,
+ uint16_t opt_type,
+ Option::Factory* factory) {
+ switch (u) {
+ case Option::V6: {
+ if (v6factories_.find(opt_type) != v6factories_.end()) {
+ isc_throw(BadValue, "There is already DHCPv6 factory registered "
+ << "for option type " << opt_type);
+ }
+ v6factories_[opt_type]=factory;
+ return;
+ }
+ case Option::V4:
+ {
+ // Option 0 is special (a one octet-long, equal 0) PAD option. It is never
+ // instantiated as an Option object, but rather consumed during packet parsing.
+ if (opt_type == 0) {
+ isc_throw(BadValue, "Cannot redefine PAD option (code=0)");
+ }
+ // Option 255 is never instantiated as an option object. It is special
+ // (a one-octet equal 255) option that is added at the end of all options
+ // during packet assembly. It is also silently consumed during packet parsing.
+ if (opt_type > 254) {
+ isc_throw(BadValue, "Too big option type for DHCPv4, only 0-254 allowed.");
+ }
+ if (v4factories_.find(opt_type)!=v4factories_.end()) {
+ isc_throw(BadValue, "There is already DHCPv4 factory registered "
+ << "for option type " << opt_type);
+ }
+ v4factories_[opt_type]=factory;
+ return;
+ }
+ default:
+ isc_throw(BadValue, "Invalid universe type specified.");
+ }
+
+ return;
+}
diff --git a/src/lib/dhcp/libdhcp++.h b/src/lib/dhcp/libdhcp++.h
new file mode 100644
index 0000000..c7935c8
--- /dev/null
+++ b/src/lib/dhcp/libdhcp++.h
@@ -0,0 +1,96 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LIBDHCP_H_
+#define LIBDHCP_H_
+
+#include <iostream>
+#include <util/buffer.h>
+#include <dhcp/pkt6.h>
+
+namespace isc {
+namespace dhcp {
+
+class LibDHCP {
+
+public:
+ /// Builds collection of options.
+ ///
+ /// Builds raw (on-wire) data for provided collection of options.
+ ///
+ /// @param buf output buffer (assembled options will be stored here)
+ /// @param options collection of options to store to
+ static void packOptions6(isc::util::OutputBuffer& buf,
+ const isc::dhcp::Option::OptionCollection& options);
+
+
+ /// @brief Stores options in a buffer.
+ ///
+ /// Stores all options defined in options containers in a on-wire
+ /// format in output buffer specified by buf.
+ ///
+ /// May throw different exceptions if option assembly fails. There
+ /// may be different reasons (option too large, option malformed,
+ /// too many options etc.)
+ ///
+ /// @param buf output buffer (assembled options will be stored here)
+ /// @param options collection of options to store to
+ static void packOptions(isc::util::OutputBuffer& buf,
+ const isc::dhcp::Option::OptionCollection& options);
+
+ /// @brief Parses provided buffer as DHCPv4 options and creates Option objects.
+ ///
+ /// Parses provided buffer and stores created Option objects
+ /// in options container.
+ ///
+ /// @param buf Buffer to be parsed.
+ /// @param options Reference to option container. Options will be
+ /// put here.
+ static size_t unpackOptions4(const OptionBuffer& buf,
+ isc::dhcp::Option::OptionCollection& options);
+
+ /// @brief Parses provided buffer as DHCPv6 options and creates Option objects.
+ ///
+ /// Parses provided buffer and stores created Option objects
+ /// in options container.
+ ///
+ /// @param buf Buffer to be parsed.
+ /// @param options Reference to option container. Options will be
+ /// put here.
+ static size_t unpackOptions6(const OptionBuffer& buf,
+ isc::dhcp::Option::OptionCollection& options);
+
+ /// Registers factory method that produces options of specific option types.
+ ///
+ /// @exception BadValue if provided type is already registered, has too large
+ /// value or invalid universe is specified
+ ///
+ /// @param u universe of the option (V4 or V6)
+ /// @param type option-type
+ /// @param factory function pointer
+ static void OptionFactoryRegister(Option::Universe u,
+ uint16_t type,
+ Option::Factory * factory);
+protected:
+ /// pointers to factories that produce DHCPv6 options
+ static std::map<unsigned short, Option::Factory*> v4factories_;
+
+ /// pointers to factories that produce DHCPv6 options
+ static std::map<unsigned short, Option::Factory*> v6factories_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
deleted file mode 100644
index e7154d9..0000000
--- a/src/lib/dhcp/libdhcp.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include "config.h"
-
-#include <boost/shared_array.hpp>
-#include <boost/shared_ptr.hpp>
-#include "dhcp/libdhcp.h"
-#include "dhcp/dhcp6.h"
-
-#include "dhcp/option.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/option6_iaaddr.h"
-
-using namespace std;
-using namespace isc::dhcp;
-
-// static array with factories for options
-std::map<unsigned short, Option::Factory*> LibDHCP::v6factories_;
-
-unsigned int
-LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
- unsigned int buf_len,
- unsigned int offset, unsigned int parse_len,
- isc::dhcp::Option::Option6Collection& options) {
- if (offset + parse_len > buf_len) {
- isc_throw(OutOfRange, "Option parse failed. Tried to parse "
- << parse_len << " bytes at offset " << offset
- << ": out of buffer");
- }
- unsigned int end = offset + parse_len;
-
- while (offset +4 <= end) {
- uint16_t opt_type = buf[offset]*256 + buf[offset+1];
- offset += 2;
- uint16_t opt_len = buf[offset]*256 + buf[offset+1];
- offset += 2;
-
- if (offset + opt_len > end ) {
- cout << "Option " << opt_type << " truncated." << endl;
- return (offset);
- }
- boost::shared_ptr<Option> opt;
- switch (opt_type) {
- case D6O_IA_NA:
- case D6O_IA_PD:
- // cout << "Creating Option6IA" << endl;
- opt = boost::shared_ptr<Option>(new Option6IA(opt_type,
- buf, buf_len,
- offset,
- opt_len));
- break;
- case D6O_IAADDR:
- // cout << "Creating Option6IAAddr" << endl;
- opt = boost::shared_ptr<Option>(new Option6IAAddr(opt_type,
- buf, buf_len,
- offset, opt_len));
- break;
- default:
- // cout << "Creating Option" << endl;
- opt = boost::shared_ptr<Option>(new Option(Option::V6,
- opt_type,
- buf,
- offset,
- opt_len));
- break;
- }
- // add option to options
- options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
- offset += opt_len;
- }
-
- return (offset);
-}
-
-unsigned int
-LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
- unsigned int data_len,
- unsigned int offset,
- const isc::dhcp::Option::Option6Collection& options) {
- try {
- for (isc::dhcp::Option::Option6Collection::const_iterator it = options.begin();
- it != options.end();
- ++it) {
- unsigned short opt_len = (*it).second->len();
- if (offset + opt_len > data_len) {
- isc_throw(OutOfRange, "Failed to build option " <<
- (*it).first << ": out of buffer");
- }
- offset = (*it).second->pack(data, data_len, offset);
- }
- }
- catch (const Exception&) {
- cout << "Packet build failed (Option build failed)." << endl;
- throw;
- }
- return (offset);
-}
-
-bool
-LibDHCP::OptionFactoryRegister(Option::Universe u,
- unsigned short opt_type,
- Option::Factory * factory) {
- switch (u) {
- case Option::V6: {
- if (v6factories_.find(opt_type)!=v6factories_.end()) {
- isc_throw(BadValue, "There is already DHCPv6 factory registered "
- << "for option type " << opt_type);
- }
- v6factories_[opt_type]=factory;
- return true;
- }
- case Option::V4:
- default:{
- isc_throw(BadValue, "This universe type is not supported yet.");
- return false; // never happens
- }
- }
-
-}
diff --git a/src/lib/dhcp/libdhcp.h b/src/lib/dhcp/libdhcp.h
deleted file mode 100644
index c2ac949..0000000
--- a/src/lib/dhcp/libdhcp.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef LIBDHCP_H_
-#define LIBDHCP_H_
-
-#include <iostream>
-#include "dhcp/pkt6.h"
-
-namespace isc {
-namespace dhcp {
-
-class LibDHCP {
-
-public:
- /// Builds collection of options.
- ///
- /// Builds raw (on-wire) data for provided collection of options.
- ///
- /// @param buf shared pointer to buffer. Data will be stored there.
- /// @param buf_len buffer length. Used for buffer overflow protection.
- /// @param offset Offset from beginning of the buffer, where store options
- /// @param options collection of options to store to
- ///
- /// @return offset to the first unused byte in buffer (next one after last
- /// used byte)
- ///
- static unsigned int
- packOptions6(boost::shared_array<uint8_t> buf, unsigned int buf_len,
- unsigned int offset,
- const isc::dhcp::Option::Option6Collection& options);
-
- ///
- /// Parses provided buffer and creates Option objects.
- ///
- /// Parses provided buf array and stores created Option objects
- /// in options container.
- ///
- /// @param buf Buffer to be parsed.
- /// @param offset Specifies offset for the first option.
- /// @param options Reference to option container. Options will be
- /// put here.
- ///
- /// @return offset to first byte after last parsed option
- ///
- static unsigned int
- unpackOptions6(const boost::shared_array<uint8_t> buf, unsigned int buf_len,
- unsigned int offset, unsigned int parse_len,
- isc::dhcp::Option::Option6Collection& options_);
-
- ///
- /// Registers factory method that produces options of specific option types.
- ///
- /// @param u universe of the option (V4 or V6)
- /// @param opt_type option-type
- /// @param factory function pointer
- ///
- /// @return true, if registration was successful, false otherwise
- ///
- static bool
- OptionFactoryRegister(Option::Universe u,
- unsigned short type,
- Option::Factory * factory);
-protected:
- // pointers to factories that produce DHCPv6 options
- static std::map<unsigned short, Option::Factory*> v6factories_;
-};
-
-}
-}
-
-#endif
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index b0b30ca..631c994 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -21,145 +21,132 @@
#endif
#include <sstream>
#include <iomanip>
-#include <boost/shared_array.hpp>
#include "exceptions/exceptions.h"
#include "util/io_utilities.h"
#include "dhcp/option.h"
-#include "dhcp/libdhcp.h"
+#include "dhcp/libdhcp++.h"
using namespace std;
-using namespace isc::dhcp;
using namespace isc::util;
-Option::Option(Universe u, unsigned short type)
- :universe_(u), type_(type), data_len_(0) {
+namespace isc {
+namespace dhcp {
+Option::Option(Universe u, uint16_t type)
+ :universe_(u), type_(type) {
+ // END option (type 255 is forbidden as well)
+ if ((u == V4) && ((type == 0) || (type > 254))) {
+ isc_throw(BadValue, "Can't create V4 option of type "
+ << type << ", V4 options are in range 1..254");
+ }
}
-Option::Option(Universe u, unsigned short type,
- const boost::shared_array<uint8_t>& buf,
- unsigned int offset, unsigned int len)
- :universe_(u), type_(type), data_(buf),
- data_len_(len), offset_(offset)
- {
+Option::Option(Universe u, uint16_t type, const OptionBuffer& data)
+ :universe_(u), type_(type), data_(data) {
+ check();
+}
- // sanity checks
- // TODO: universe must be in V4 and V6
+Option::Option(Universe u, uint16_t type, OptionBufferConstIter first,
+ OptionBufferConstIter last)
+ :universe_(u), type_(type), data_(OptionBuffer(first,last)) {
+ check();
}
-unsigned int
-Option::pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- switch (universe_) {
- case V4:
- return pack4(buf, buf_len, offset);
- case V6:
- return pack6(buf, buf_len, offset);
- default:
- isc_throw(BadValue, "Unknown universe defined for Option " << type_);
+void
+Option::check() {
+ if ( (universe_ != V4) && (universe_ != V6) ) {
+ isc_throw(BadValue, "Invalid universe type specified."
+ << "Only V4 and V6 are allowed.");
}
-}
+ if (universe_ == V4) {
-unsigned int
-Option::pack4(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- if ( offset+len() > buf_len ) {
- isc_throw(OutOfRange, "Failed to pack v4 option=" <<
- type_ << ",len=" << data_len_ << ": too small buffer.");
+ if (type_ > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option type " << type_ << " is too big."
+ << "For DHCPv4 allowed type range is 0..255");
+ } else if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
}
- uint8_t *ptr = &buf[offset];
- ptr[0] = type_;
- ptr[1] = data_len_;
- ptr += 2;
- memcpy(ptr, &data_[0], data_len_);
- return offset + len();
+ // no need to check anything for DHCPv6. It allows full range (0-64k) of
+ // both types and data size.
}
-unsigned int
-Option::pack6(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- if ( offset+len() > buf_len ) {
- isc_throw(OutOfRange, "Failed to pack v6 option=" <<
- type_ << ",len=" << len() << ": too small buffer.");
+void Option::pack(isc::util::OutputBuffer& buf) {
+ switch (universe_) {
+ case V6:
+ return (pack6(buf));
+ case V4:
+ return (pack4(buf));
+ default:
+ isc_throw(BadValue, "Failed to pack " << type_ << " option. Do not "
+ << "use this method for options other than DHCPv6.");
}
+}
- uint8_t * ptr = &buf[offset];
-
- ptr = writeUint16(type_, ptr);
-
- ptr = writeUint16(len() - getHeaderLen(), ptr);
-
- if (data_len_)
- memcpy(ptr, &data_[offset_], data_len_);
+void
+Option::pack4(isc::util::OutputBuffer& buf) {
+ switch (universe_) {
+ case V4: {
+ if (len() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big."
+ << "At most 255 bytes are supported.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
- // end of fixed part of this option
- offset += OPTION6_HDR_LEN + data_len_;
+ buf.writeUint8(type_);
+ buf.writeUint8(len() - getHeaderLen());
- return LibDHCP::packOptions6(buf, buf_len, offset, options_);
-}
+ buf.writeData(&data_[0], data_.size());
-unsigned int
-Option::unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len) {
- switch (universe_) {
- case V4:
- return unpack4(buf, buf_len, offset, parse_len);
+ LibDHCP::packOptions(buf, options_);
+ return;
+ }
case V6:
- return unpack6(buf, buf_len, offset, parse_len);
+ /// TODO: Do we need a sanity check for option size here?
+ buf.writeUint16(type_);
+ buf.writeUint16(len() - getHeaderLen());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
default:
- isc_throw(BadValue, "Unknown universe defined for Option " << type_);
+ isc_throw(OutOfRange, "Invalid universe type" << universe_);
}
-
- return 0; // should not happen
}
-unsigned int
-Option::unpack4(const boost::shared_array<uint8_t>&,
- unsigned int ,
- unsigned int ,
- unsigned int ) {
- isc_throw(Unexpected, "IPv4 support not implemented yet.");
- return 0;
-}
+void Option::pack6(isc::util::OutputBuffer& buf) {
+ buf.writeUint16(type_);
+ buf.writeUint16(len() - getHeaderLen());
-unsigned int
-Option::unpack6(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len) {
-
- if (buf_len < offset+parse_len) {
- isc_throw(OutOfRange, "Failed to unpack DHCPv6 option len="
- << parse_len << " offset=" << offset
- << " from buffer (length=" << buf_len
- << "): too small buffer.");
+ if (! data_.empty()) {
+ buf.writeData(&data_[0], data_.size());
}
- data_ = buf;
- offset_ = offset;
- data_len_ = buf_len;
+ return LibDHCP::packOptions6(buf, options_);
+}
- return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
- options_);
+void Option::unpack(OptionBufferConstIter begin,
+ OptionBufferConstIter end) {
+ data_ = OptionBuffer(begin, end);
}
-unsigned short
-Option::len() {
+uint16_t Option::len() {
+ // Returns length of the complete option (data length + DHCPv4/DHCPv6
+ // option header)
// length of the whole option is header and data stored in this option...
- int length = getHeaderLen() + data_len_;
+ int length = getHeaderLen() + data_.size();
// ... and sum of lengths of all suboptions
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
@@ -181,26 +168,17 @@ Option::valid() {
return (true);
}
-void
-isc::dhcp::Option::addOption(boost::shared_ptr<isc::dhcp::Option> opt) {
- options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(),
- opt));
-
-}
-
-boost::shared_ptr<isc::dhcp::Option>
-Option::getOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::const_iterator x =
+OptionPtr Option::getOption(uint16_t opt_type) {
+ isc::dhcp::Option::OptionCollection::const_iterator x =
options_.find(opt_type);
if ( x != options_.end() ) {
return (*x).second;
}
- return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+ return OptionPtr(); // NULL
}
-bool
-Option::delOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::iterator x = options_.find(opt_type);
+bool Option::delOption(uint16_t opt_type) {
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(opt_type);
if ( x != options_.end() ) {
options_.erase(x);
return true; // delete successful
@@ -212,43 +190,29 @@ Option::delOption(unsigned short opt_type) {
std::string Option::toText(int indent /* =0 */ ) {
std::stringstream tmp;
- for (int i=0; i<indent; i++)
+ for (int i = 0; i < indent; i++)
tmp << " ";
- tmp << "type=" << type_ << ", len=" << data_len_ << ": ";
+ tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ": ";
- for (unsigned int i=0; i<data_len_; i++) {
+ for (unsigned int i = 0; i < data_.size(); i++) {
if (i) {
tmp << ":";
}
tmp << setfill('0') << setw(2) << hex
- << static_cast<unsigned short>(data_[offset_+i]);
+ << static_cast<unsigned short>(data_[i]);
}
// print suboptions
- for (Option6Collection::const_iterator opt=options_.begin();
- opt!=options_.end();
+ for (OptionCollection::const_iterator opt = options_.begin();
+ opt != options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
}
return tmp.str();
}
-unsigned short
-Option::getType() {
- return type_;
-}
-
-uint8_t*
-Option::getData() {
- if (data_len_) {
- return (&data_[offset_]);
- } else {
- return (NULL);
- }
-}
-
-unsigned short
+uint16_t
Option::getHeaderLen() {
switch (universe_) {
case V4:
@@ -259,6 +223,68 @@ Option::getHeaderLen() {
return 0; // should not happen
}
+void Option::addOption(OptionPtr opt) {
+ if (universe_ == V4) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ }
+ options_.insert(make_pair(opt->getType(), opt));
+}
+
+uint8_t Option::getUint8() {
+ if (data_.size() < sizeof(uint8_t) ) {
+ isc_throw(OutOfRange, "Attempt to read uint8 from option " << type_
+ << " that has size " << data_.size());
+ }
+ return (data_[0]);
+}
+
+uint16_t Option::getUint16() {
+ if (data_.size() < sizeof(uint16_t) ) {
+ isc_throw(OutOfRange, "Attempt to read uint16 from option " << type_
+ << " that has size " << data_.size());
+ }
+
+ return ( readUint16(&data_[0]) );
+}
+
+uint32_t Option::getUint32() {
+ if (data_.size() < sizeof(uint32_t) ) {
+ isc_throw(OutOfRange, "Attempt to read uint32 from option " << type_
+ << " that has size " << data_.size());
+ }
+ return ( readUint32(&data_[0]) );
+}
+
+void Option::setUint8(uint8_t value) {
+ data_.resize(1);
+ data_[0] = value;
+}
+
+void Option::setUint16(uint16_t value) {
+ data_.resize(2);
+ writeUint16(value, &data_[0]);
+}
+
+void Option::setUint32(uint32_t value) {
+ data_.resize(4);
+ writeUint32(value, &data_[0]);
+}
+
+void Option::setData(const OptionBufferConstIter first,
+ const OptionBufferConstIter last) {
+ // We will copy entire option buffer, so we have to resize data_.
+ data_.resize(std::distance(first, last));
+ std::copy(first, last, data_.begin());
+}
+
+
Option::~Option() {
}
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index 5caacdb..4354cb8 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -18,12 +18,32 @@
#include <stdint.h>
#include <string>
#include <map>
+#include <vector>
#include <boost/shared_ptr.hpp>
-#include <boost/shared_array.hpp>
+#include <util/buffer.h>
namespace isc {
namespace dhcp {
+/// @brief buffer types used in DHCP code.
+///
+/// Dereferencing OptionBuffer iterator will point out to contiguous memory.
+typedef std::vector<uint8_t> OptionBuffer;
+
+/// iterator for walking over OptionBuffer
+typedef OptionBuffer::iterator OptionBufferIter;
+
+/// const_iterator for walking over OptionBuffer
+typedef OptionBuffer::const_iterator OptionBufferConstIter;
+
+/// pointer to a DHCP buffer
+typedef boost::shared_ptr<OptionBuffer> OptionBufferPtr;
+
+/// shared pointer to Option object
+class Option;
+typedef boost::shared_ptr<Option> OptionPtr;
+
+
class Option {
public:
/// length of the usual DHCPv4 option header (there are exceptions)
@@ -35,124 +55,125 @@ public:
/// defines option universe DHCPv4 or DHCPv6
enum Universe { V4, V6 };
- /// a collection of DHCPv4 options
- typedef std::map<unsigned int, boost::shared_ptr<Option> >
- Option4Collection;
-
/// a collection of DHCPv6 options
- typedef std::multimap<unsigned int, boost::shared_ptr<Option> >
- Option6Collection;
+ typedef std::multimap<unsigned int, OptionPtr> OptionCollection;
/// @brief a factory function prototype
///
/// @param u option universe (DHCPv4 or DHCPv6)
/// @param type option type
/// @param buf pointer to a buffer
- /// @param offset offset to first data byte in that buffer
- /// @param len data length of this option
///
/// @return a pointer to a created option object
- typedef boost::shared_ptr<Option> Factory(Option::Universe u,
- unsigned short type,
- boost::shared_array<uint8_t>& buf,
- unsigned int offset,
- unsigned int len);
+ typedef OptionPtr Factory(Option::Universe u, uint16_t type, const OptionBuffer& buf);
/// @brief ctor, used for options constructed, usually during transmission
///
/// @param u option universe (DHCPv4 or DHCPv6)
/// @param type option type
- Option(Universe u, unsigned short type);
+ Option(Universe u, uint16_t type);
- /// @brief ctor, used for received options
+ /// @brief Constructor, used for received options.
///
- /// boost::shared_array allows sharing a buffer, but it requires that
- /// different instances share pointer to the whole array, not point
- /// to different elements in shared array. Therefore we need to share
- /// pointer to the whole array and remember offset where data for
- /// this option begins
+ /// This constructor takes vector<uint8_t>& which is used in cases
+ /// when content of the option will be copied and stored within
+ /// option object. V4 Options follow that approach already.
+ /// TODO Migrate V6 options to that approach.
///
/// @param u specifies universe (V4 or V6)
- /// @param type option type
- /// @param buf pointer to a buffer
- /// @param offset offset in a buffer pointing to first byte of data
- /// @param len length of the option data
- Option(Universe u, unsigned short type,
- const boost::shared_array<uint8_t>& buf, unsigned int offset,
- unsigned int len);
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param data content of the option
+ Option(Universe u, uint16_t type, const OptionBuffer& data);
- /// @brief writes option in wire-format to buf
+ /// @brief Constructor, used for received options.
+ ///
+ /// This contructor is similar to the previous one, but it does not take
+ /// the whole vector<uint8_t>, but rather subset of it.
+ ///
+ /// TODO: This can be templated to use different containers, not just
+ /// vector. Prototype should look like this:
+ /// template<typename InputIterator> Option(Universe u, uint16_t type,
+ /// InputIterator first, InputIterator last);
+ ///
+ /// vector<int8_t> myData;
+ /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+ /// This will create DHCPv4 option of type 123 that contains data from
+ /// trimmed (first and last byte removed) myData vector.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param first iterator to the first element that should be copied
+ /// @param last iterator to the next element after the last one
+ /// to be copied.
+ Option(Universe u, uint16_t type, OptionBufferConstIter first,
+ OptionBufferConstIter last);
+
+ /// @brief returns option universe (V4 or V6)
+ ///
+ /// @return universe type
+ Universe getUniverse() { return universe_; };
+
+ /// @brief Writes option in wire-format to a buffer.
///
/// Writes option in wire-format to buffer, returns pointer to first unused
/// byte after stored option (that is useful for writing options one after
- /// another)
- ///
- /// @param buf pointer to a buffer
- /// @param buf_len length of the buffer
- /// @param offset offset to place, where option shout be stored
+ /// another). Used in DHCPv6 options.
///
- /// @return offset to first unused byte after stored option
+ /// TODO: Migrate DHCPv6 code to pack(OutputBuffer& buf) version
///
- virtual unsigned int
- pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset);
+ /// @param buf pointer to a buffer
+ virtual void pack(isc::util::OutputBuffer& buf);
- /// @brief Parses buffer.
+ /// @brief Writes option in a wire-format to a buffer.
+ ///
+ /// Method will throw if option storing fails for some reason.
///
- /// Parses received buffer, returns offset to the first unused byte after
- /// parsed option.
+ /// TODO Once old (DHCPv6) implementation is rewritten,
+ /// unify pack4() and pack6() and rename them to just pack().
///
- /// @param buf pointer to buffer
- /// @param buf_len length of buf
- /// @param offset offset, where start parsing option
- /// @param parse_len how many bytes should be parsed
+ /// @param buf output buffer (option will be stored there)
+ virtual void pack4(isc::util::OutputBuffer& buf);
+
+ /// @brief Parses received buffer.
///
- /// @return offset after last parsed octet
- virtual unsigned int
- unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ virtual void unpack(OptionBufferConstIter begin,
+ OptionBufferConstIter end);
/// Returns string representation of the option.
///
/// @param indent number of spaces before printing text
///
/// @return string with text representation.
- virtual std::string
- toText(int indent = 0);
+ virtual std::string toText(int indent = 0);
/// Returns option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
///
/// @return option type
- unsigned short
- getType();
+ uint16_t getType() { return (type_); }
/// Returns length of the complete option (data length + DHCPv4/DHCPv6
/// option header)
///
/// @return length of the option
- virtual unsigned short
- len();
+ virtual uint16_t len();
/// @brief Returns length of header (2 for v4, 4 for v6)
///
/// @return length of option header
- virtual unsigned short
- getHeaderLen();
+ virtual uint16_t getHeaderLen();
/// returns if option is valid (e.g. option may be truncated)
///
/// @return true, if option is valid
- virtual bool
- valid();
+ virtual bool valid();
/// Returns pointer to actual data.
///
- /// @return pointer to actual data (or NULL if there is no data)
- virtual uint8_t*
- getData();
+ /// @return pointer to actual data (or reference to an empty vector
+ /// if there is no data)
+ virtual const OptionBuffer& getData() { return (data_); }
/// Adds a sub-option.
///
@@ -166,103 +187,101 @@ public:
/// many places. Requiring casting is not feasible.
///
/// @param opt shared pointer to a suboption that is going to be added.
- void
- addOption(boost::shared_ptr<Option> opt);
+ void addOption(OptionPtr opt);
/// Returns shared_ptr to suboption of specific type
///
/// @param type type of requested suboption
///
/// @return shared_ptr to requested suoption
- boost::shared_ptr<isc::dhcp::Option>
- getOption(unsigned short type);
+ OptionPtr getOption(uint16_t type);
/// Attempts to delete first suboption of requested type
///
/// @param type Type of option to be deleted.
///
/// @return true if option was deleted, false if no such option existed
- bool
- delOption(unsigned short type);
+ bool delOption(uint16_t type);
- /// just to force that every option has virtual dtor
- virtual
- ~Option();
+ /// @brief Returns content of first byte.
+ ///
+ /// @exception OutOfRange Thrown if the option has a length of 0.
+ ///
+ /// @return value of the first byte
+ uint8_t getUint8();
-protected:
+ /// @brief Returns content of first word.
+ ///
+ /// @exception OutOfRange Thrown if the option has a length less than 2.
+ ///
+ /// @return uint16_t value stored on first two bytes
+ uint16_t getUint16();
- /// Builds raw (over-wire) buffer of this option, including all
- /// defined suboptions. Version for building DHCPv4 options.
+ /// @brief Returns content of first double word.
///
- /// @param buf output buffer (built options will be stored here)
- /// @param buf_len buffer length (used for buffer overflow checks)
- /// @param offset offset from start of the buf buffer
+ /// @exception OutOfRange Thrown if the option has a length less than 4.
///
- /// @return offset to the next byte after last used byte
- virtual unsigned int
- pack4(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset);
+ /// @return uint32_t value stored on first four bytes
+ uint32_t getUint32();
- /// Builds raw (over-wire) buffer of this option, including all
- /// defined suboptions. Version for building DHCPv4 options.
+ /// @brief Sets content of this option to singe uint8 value.
///
- /// @param buf output buffer (built options will be stored here)
- /// @param buf_len buffer length (used for buffer overflow checks)
- /// @param offset offset from start of the buf buffer
+ /// Option it resized appropriately (to length of 1 octet).
+ ///
+ /// @param value value to be set
+ void setUint8(uint8_t value);
+
+ /// @brief Sets content of this option to singe uint16 value.
+ ///
+ /// Option it resized appropriately (to length of 2 octets).
///
- /// @return offset to the next byte after last used byte
- virtual unsigned int
- pack6(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset);
+ /// @param value value to be set
+ void setUint16(uint16_t value);
- /// Parses provided buffer and creates DHCPv4 options.
+ /// @brief Sets content of this option to singe uint32 value.
///
- /// @param buf buffer that contains raw buffer to parse (on-wire format)
- /// @param buf_len buffer length (used for buffer overflow checks)
- /// @param offset offset from start of the buf buffer
+ /// Option it resized appropriately (to length of 4 octets).
///
- /// @return offset to the next byte after last parsed byte
- virtual unsigned int
- unpack4(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len);
+ /// @param value value to be set
+ void setUint32(uint32_t value);
- /// Parses provided buffer and creates DHCPv6 options.
+ /// @brief Sets content of this option from buffer.
///
- /// @param buf buffer that contains raw buffer to parse (on-wire format)
- /// @param buf_len buffer length (used for buffer overflow checks)
- /// @param offset offset from start of the buf buffer
+ /// Option will be resized to length of buffer.
///
- /// @return offset to the next byte after last parsed byte
- virtual unsigned int
- unpack6(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len);
+ /// @param first iterator pointing begining of buffer to copy.
+ /// @param last iterator pointing to end of buffer to copy.
+ void setData(const OptionBufferConstIter first,
+ const OptionBufferConstIter last);
+
+ /// just to force that every option has virtual dtor
+ virtual ~Option();
+
+protected:
+ /// Builds raw (over-wire) buffer of this option, including all
+ /// defined suboptions. Version for building DHCPv4 options.
+ ///
+ /// @param buf output buffer (built options will be stored here)
+ virtual void pack6(isc::util::OutputBuffer& buf);
+
+ /// @brief A private method used for option correctness.
+ ///
+ /// It is used in constructors. In there are any problems detected
+ /// (like specifying type > 255 for DHCPv4 option), it will throw
+ /// BadValue or OutOfRange exceptions.
+ void check();
/// option universe (V4 or V6)
Universe universe_;
/// option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
- unsigned short type_;
-
- /// shared pointer to a buffer (usually a part of packet)
- boost::shared_array<uint8_t> data_;
-
- /// length of data only. Use len() if you want to
- /// know proper length with option header overhead
- unsigned int data_len_;
+ uint16_t type_;
- /// data is a shared_pointer that points out to the
- /// whole packet. offset_ specifies where data for
- /// this option begins.
- unsigned int offset_;
+ /// contains content of this data
+ OptionBuffer data_;
/// collection for storing suboptions
- Option6Collection options_;
+ OptionCollection options_;
/// TODO: probably 2 different containers have to be used for v4 (unique
/// options) and v6 (options with the same type can repeat)
diff --git a/src/lib/dhcp/option4_addrlst.cc b/src/lib/dhcp/option4_addrlst.cc
new file mode 100644
index 0000000..9e3f7f4
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.cc
@@ -0,0 +1,143 @@
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#else
+#include <arpa/inet.h>
+#endif
+#include <sstream>
+#include <iomanip>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <util/io_utilities.h>
+#include <dhcp/option4_addrlst.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::asiolink;
+
+namespace isc {
+namespace dhcp {
+
+Option4AddrLst::Option4AddrLst(uint8_t type)
+ :Option(V4, type) {
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const AddressContainer& addrs)
+ :Option(V4, type) {
+ setAddresses(addrs);
+ // don't set addrs_ directly. setAddresses() will do additional checks.
+}
+
+
+Option4AddrLst::Option4AddrLst(uint8_t type, OptionBufferConstIter first,
+ OptionBufferConstIter last)
+ :Option(V4, type) {
+ if ( (distance(first, last) % V4ADDRESS_LEN) ) {
+ isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_
+ << " has invalid length=" << distance(first, last)
+ << ", must be divisible by 4.");
+ }
+
+ while (first != last) {
+ const uint8_t* ptr = &(*first);
+ addAddress(IOAddress(readUint32(ptr)));
+ first += V4ADDRESS_LEN;
+ }
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const IOAddress& addr)
+ :Option(V4, type) {
+ setAddress(addr);
+}
+
+void
+Option4AddrLst::pack4(isc::util::OutputBuffer& buf) {
+
+ if (addrs_.size() * V4ADDRESS_LEN > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_ << " is too big."
+ << "At most 255 bytes are supported.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+
+ buf.writeUint8(type_);
+ buf.writeUint8(len() - getHeaderLen());
+
+ AddressContainer::const_iterator addr = addrs_.begin();
+
+ while (addr != addrs_.end()) {
+ buf.writeUint32(*addr);
+ ++addr;
+ }
+}
+
+void Option4AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+ if (addr.getFamily() != AF_INET) {
+ isc_throw(BadValue, "Can't store non-IPv4 address in "
+ << "Option4AddrLst option");
+ }
+ addrs_.clear();
+ addAddress(addr);
+}
+
+void Option4AddrLst::setAddresses(const AddressContainer& addrs) {
+
+ // Do not copy it as a whole. addAddress() does sanity checks.
+ // i.e. throw if someone tries to set IPv6 address.
+ addrs_.clear();
+ for (AddressContainer::const_iterator addr = addrs.begin();
+ addr != addrs.end(); ++addr) {
+ addAddress(*addr);
+ }
+}
+
+
+void Option4AddrLst::addAddress(const isc::asiolink::IOAddress& addr) {
+ if (addr.getFamily() != AF_INET) {
+ isc_throw(BadValue, "Can't store non-IPv4 address in "
+ << "Option4AddrLst option");
+ }
+ addrs_.push_back(addr);
+}
+
+uint16_t Option4AddrLst::len() {
+
+ // Returns length of the complete option (option header + data length)
+ return (getHeaderLen() + addrs_.size() * V4ADDRESS_LEN);
+}
+
+std::string Option4AddrLst::toText(int indent /* =0 */ ) {
+ std::stringstream tmp;
+
+ for (int i = 0; i < indent; i++) {
+ tmp << " ";
+ }
+
+ tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ":";
+
+ for (AddressContainer::const_iterator addr = addrs_.begin();
+ addr != addrs_.end(); ++addr) {
+ tmp << " " << (*addr);
+ }
+
+ return tmp.str();
+}
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
diff --git a/src/lib/dhcp/option4_addrlst.h b/src/lib/dhcp/option4_addrlst.h
new file mode 100644
index 0000000..3bedc6d
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.h
@@ -0,0 +1,164 @@
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION4_ADDRLST_H_
+#define OPTION4_ADDRLST_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <dhcp/option.h>
+
+namespace isc {
+namespace dhcp {
+
+
+/// @brief DHCPv4 Option class for handling list of IPv4 addresses.
+///
+/// This class handles a list of IPv4 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle a single address.
+class Option4AddrLst : public isc::dhcp::Option {
+public:
+
+ /// Defines a collection of IPv4 addresses.
+ typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+ /// @brief Constructor, creates an option with empty list of addresses.
+ ///
+ /// Creates empty option that can hold addresses. Addresses can be added
+ /// with addAddress(), setAddress() or setAddresses().
+ ///
+ /// @param type option type
+ Option4AddrLst(uint8_t type);
+
+ /// @brief Constructor, creates an option with a list of addresses.
+ ///
+ /// Creates an option that contains specified list of IPv4 addresses.
+ ///
+ /// @param type option type
+ /// @param addrs container with a list of addresses
+ Option4AddrLst(uint8_t type, const AddressContainer& addrs);
+
+ /// @brief Constructor, creates an option with a single address.
+ ///
+ /// Creates an option that contains a single address.
+ ///
+ /// @param type option type
+ /// @param addr a single address that will be stored as 1-elem. address list
+ Option4AddrLst(uint8_t type, const isc::asiolink::IOAddress& addr);
+
+ /// @brief Constructor, used for received options.
+ ///
+ /// TODO: This can be templated to use different containers, not just
+ /// vector. Prototype should look like this:
+ /// template<typename InputIterator> Option(Universe u, uint16_t type,
+ /// InputIterator first, InputIterator last);
+ ///
+ /// vector<int8_t> myData;
+ /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+ /// This will create DHCPv4 option of type 123 that contains data from
+ /// trimmed (first and last byte removed) myData vector.
+ ///
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param first iterator to the first element that should be copied
+ /// @param last iterator to the next element after the last one
+ /// to be copied.
+ Option4AddrLst(uint8_t type, OptionBufferConstIter first,
+ OptionBufferConstIter last);
+
+ /// @brief Writes option in a wire-format to a buffer.
+ ///
+ /// Method will throw if option storing fails for some reason.
+ ///
+ /// TODO Once old (DHCPv6) implementation is rewritten,
+ /// unify pack4() and pack6() and rename them to just pack().
+ ///
+ /// @param buf output buffer (option will be stored there)
+ virtual void pack4(isc::util::OutputBuffer& buf);
+
+ /// Returns string representation of the option.
+ ///
+ /// @param indent number of spaces before printing text
+ ///
+ /// @return string with text representation.
+ virtual std::string toText(int indent = 0);
+
+ /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+ /// option header)
+ ///
+ /// @return length of the option
+ virtual uint16_t len();
+
+ /// @brief Returns vector with addresses.
+ ///
+ /// We return a copy of our list. Although this includes overhead,
+ /// it also makes this list safe to use after this option object
+ /// is no longer available. As options are expected to hold only
+ /// a couple (1-3) addresses, the overhead is not that big.
+ ///
+ /// @return address container with addresses
+ AddressContainer getAddresses() { return addrs_; };
+
+ /// @brief Sets addresses list.
+ ///
+ /// Clears existing list of addresses and adds a single address to that
+ /// list. This is very convenient method for options that are supposed to
+ /// only a single option. See addAddress() if you want to add
+ /// address to existing list or setAddresses() if you want to
+ /// set the whole list at once.
+ ///
+ /// Passed address must be IPv4 address. Otherwire BadValue exception
+ /// will be thrown.
+ ///
+ /// @param addrs address collection to be set
+ void setAddresses(const AddressContainer& addrs);
+
+ /// @brief Clears address list and sets a single address.
+ ///
+ /// Clears existing list of addresses and adds a single address to that
+ /// list. This is very convenient method for options that are supposed to
+ /// only a single option. See addAddress() if you want to add
+ /// address to existing list or setAddresses() if you want to
+ /// set the whole list at once.
+ ///
+ /// Passed address must be IPv4 address. Otherwire BadValue exception
+ /// will be thrown.
+ ///
+ /// @param addr an address that is going to be set as 1-element address list
+ void setAddress(const isc::asiolink::IOAddress& addr);
+
+ /// @brief Adds address to existing list of addresses.
+ ///
+ /// Adds a single address to that list. See setAddress() if you want to
+ /// define only a single address or setAddresses() if you want to
+ /// set the whole list at once.
+ ///
+ /// Passed address must be IPv4 address. Otherwire BadValue exception
+ /// will be thrown.
+ ///
+ /// @param addr an address thait is going to be added to existing list
+ void addAddress(const isc::asiolink::IOAddress& addr);
+
+protected:
+ /// contains list of addresses
+ AddressContainer addrs_;
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option6_addrlst.cc b/src/lib/dhcp/option6_addrlst.cc
index 1ab297e..ff2eabc 100644
--- a/src/lib/dhcp/option6_addrlst.cc
+++ b/src/lib/dhcp/option6_addrlst.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -23,7 +23,7 @@
#include "asiolink/io_address.h"
#include "util/io_utilities.h"
-#include "dhcp/libdhcp.h"
+#include "dhcp/libdhcp++.h"
#include "dhcp/option6_addrlst.h"
#include "dhcp/dhcp6.h"
@@ -33,27 +33,29 @@ using namespace isc::dhcp;
using namespace isc::asiolink;
using namespace isc::util;
-Option6AddrLst::Option6AddrLst(unsigned short type,
- const AddressContainer& addrs)
+namespace isc {
+namespace dhcp {
+
+Option6AddrLst::Option6AddrLst(uint16_t type, const AddressContainer& addrs)
:Option(V6, type), addrs_(addrs) {
}
-Option6AddrLst::Option6AddrLst(unsigned short type,
- const isc::asiolink::IOAddress& addr)
+Option6AddrLst::Option6AddrLst(uint16_t type, const isc::asiolink::IOAddress& addr)
:Option(V6, type), addrs_(1,addr) {
}
-Option6AddrLst::Option6AddrLst(unsigned short type,
- boost::shared_array<uint8_t> buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int option_len)
+Option6AddrLst::Option6AddrLst(uint16_t type, OptionBufferConstIter begin,
+ OptionBufferConstIter end)
:Option(V6, type) {
- unpack(buf, buf_len, offset, option_len);
+ unpack(begin, end);
}
void
Option6AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+ if (addr.getFamily() != AF_INET6) {
+ isc_throw(BadValue, "Can't store non-IPv6 address in Option6AddrLst option");
+ }
+
addrs_.clear();
addrs_.push_back(addr);
}
@@ -63,76 +65,51 @@ Option6AddrLst::setAddresses(const AddressContainer& addrs) {
addrs_ = addrs;
}
-unsigned int
-Option6AddrLst::pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- if (len() > buf_len) {
- isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
- << ", buffer=" << buf_len << ": too small buffer.");
- }
+void Option6AddrLst::pack(isc::util::OutputBuffer& buf) {
- writeUint16(type_, &buf[offset]);
- offset += sizeof(uint16_t);
+ buf.writeUint16(type_);
// len() returns complete option length.
// len field contains length without 4-byte option header
- writeUint16(len() - OPTION6_HDR_LEN, &buf[offset]);
- offset += sizeof(uint16_t);
+ buf.writeUint16(len() - getHeaderLen());
- // this wrapping is *ugly*. I wish there was a a
for (AddressContainer::const_iterator addr=addrs_.begin();
- addr!=addrs_.end();
- ++addr) {
- memcpy(&buf[offset],
- addr->getAddress().to_v6().to_bytes().data(),
- V6ADDRESS_LEN);
- offset += V6ADDRESS_LEN;
+ addr!=addrs_.end(); ++addr) {
+ buf.writeData(addr->getAddress().to_v6().to_bytes().data(), V6ADDRESS_LEN);
}
-
- return offset;
}
-unsigned int
-Option6AddrLst::unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int option_len) {
- if (offset+option_len > buf_len) {
+void Option6AddrLst::unpack(OptionBufferConstIter begin,
+ OptionBufferConstIter end) {
+ if ((distance(begin, end) % V6ADDRESS_LEN) != 0) {
isc_throw(OutOfRange, "Option " << type_
- << " truncated.");
- }
-
- if (option_len%16) {
- isc_throw(OutOfRange, "Option " << type_
- << " malformed: len=" << option_len
+ << " malformed: len=" << distance(begin, end)
<< " is not divisible by 16.");
}
- while (option_len > 0) {
- addrs_.push_back(IOAddress::from_bytes(AF_INET6, &buf[offset]));
- offset += 16;
- option_len -= 16;
+ while (begin != end) {
+ addrs_.push_back(IOAddress::from_bytes(AF_INET6, &(*begin)));
+ begin += V6ADDRESS_LEN;
}
-
- return offset;
}
std::string Option6AddrLst::toText(int indent /* =0 */) {
stringstream tmp;
- for (int i=0; i<indent; i++)
+ for (int i = 0; i < indent; i++)
tmp << " ";
tmp << "type=" << type_ << " " << addrs_.size() << "addr(s): ";
for (AddressContainer::const_iterator addr=addrs_.begin();
- addr!=addrs_.end();
- ++addr) {
+ addr!=addrs_.end(); ++addr) {
tmp << addr->toText() << " ";
}
return tmp.str();
}
-unsigned short Option6AddrLst::len() {
+uint16_t Option6AddrLst::len() {
- return (OPTION6_HDR_LEN + addrs_.size()*16);
+ return (OPTION6_HDR_LEN + addrs_.size()*V6ADDRESS_LEN);
}
+
+} // end of namespace isc::dhcp
+} // end of namespace isc
diff --git a/src/lib/dhcp/option6_addrlst.h b/src/lib/dhcp/option6_addrlst.h
index c5b32af..209d2dd 100644
--- a/src/lib/dhcp/option6_addrlst.h
+++ b/src/lib/dhcp/option6_addrlst.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -16,17 +16,16 @@
#define OPTION6_ADDRLST_H_
#include <vector>
-#include "asiolink/io_address.h"
-#include "dhcp/option.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
namespace isc {
namespace dhcp {
-/// @brief Option class for handling list of IPv6 addresses.
+/// @brief DHCPv6 Option class for handling list of IPv6 addresses.
///
/// This class handles a list of IPv6 addresses. An example of such option
/// is dns-servers option. It can also be used to handle single address.
-///
class Option6AddrLst: public Option {
public:
@@ -37,85 +36,58 @@ public:
///
/// @param type option type
/// @param addrs vector of addresses to be stored
- ///
- Option6AddrLst(unsigned short type,
- const AddressContainer& addrs);
+ Option6AddrLst(uint16_t type, const AddressContainer& addrs);
/// @brief Simplified constructor for a single address
///
/// @param type option type
/// @param addr a single address to be stored
- ///
- Option6AddrLst(unsigned short type,
- const isc::asiolink::IOAddress& addr);
+ Option6AddrLst(uint16_t type, const isc::asiolink::IOAddress& addr);
/// @brief Constructor used for parsing received option
///
/// @param type option type
- /// @param buf pointer to packet buffer
- /// @param buf_len length of packet buffer
- /// @param offset offset to beginning of option data
- /// @param len length of option data
- ///
- Option6AddrLst(unsigned short type, boost::shared_array<uint8_t> buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ Option6AddrLst(uint16_t type, OptionBufferConstIter begin,
+ OptionBufferConstIter end);
/// @brief Assembles on-wire form of this option
///
/// @param buf pointer to packet buffer
- /// @param buf_len length of packet buffer
- /// @param offset offset to place, where option is to be stored
- ///
- /// @return offset to the next unused char (just after stored option)
- ///
- unsigned int
- pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
- unsigned int offset);
+ void pack(isc::util::OutputBuffer& buf);
/// @brief Parses received data
///
- /// @param buf pointer to packet buffer
- /// @param buf_len length of packet buffer
- /// @param offset offset to option data
- /// @param parse_len specified option data length
- ///
- /// @return offset to the next unparsed char (just after parsed option)
- ///
- virtual unsigned int
- unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ virtual void unpack(OptionBufferConstIter begin,
+ OptionBufferConstIter end);
virtual std::string toText(int indent = 0);
/// @brief Sets a single address.
///
/// @param addr a single address to be added
- ///
void setAddress(const isc::asiolink::IOAddress& addr);
/// @brief Sets list of addresses.
///
/// @param addrs a vector of addresses to be added
- ///
void setAddresses(const AddressContainer& addrs);
/// @brief Returns vector with addresses.
///
- /// As user may want to use/modify this list, it is better to return
- /// a copy rather than const reference to the original. This is
- /// usually one or two addresses long, so it is not a big deal.
- ///
- /// @return vector with addresses
+ /// We return a copy of our list. Although this includes overhead,
+ /// it also makes this list safe to use after this option object
+ /// is no longer available. As options are expected to hold only
+ /// a couple (1-3) addresses, the overhead is not that big.
///
- AddressContainer
- getAddresses() { return addrs_; };
+ /// @return address container with addresses
+ AddressContainer getAddresses() { return addrs_; };
// returns data length (data length + DHCPv4/DHCPv6 option header)
- virtual unsigned short len();
+ virtual uint16_t len();
protected:
AddressContainer addrs_;
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
index a3a8a36..d230ea7 100644
--- a/src/lib/dhcp/option6_ia.cc
+++ b/src/lib/dhcp/option6_ia.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -19,82 +19,54 @@
#include <arpa/inet.h>
#endif
#include <sstream>
-#include "exceptions/exceptions.h"
-#include "dhcp/libdhcp.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/dhcp6.h"
-#include "util/io_utilities.h"
+#include <exceptions/exceptions.h>
+#include <dhcp/libdhcp++.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/dhcp6.h>
+#include <util/io_utilities.h>
using namespace std;
-using namespace isc;
-using namespace isc::dhcp;
using namespace isc::util;
-Option6IA::Option6IA(unsigned short type, unsigned int iaid)
+namespace isc {
+namespace dhcp {
+
+Option6IA::Option6IA(uint16_t type, uint32_t iaid)
:Option(Option::V6, type), iaid_(iaid) {
}
-Option6IA::Option6IA(unsigned short type,
- const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int option_len)
+Option6IA::Option6IA(uint16_t type, OptionBufferConstIter begin, OptionBufferConstIter end)
:Option(Option::V6, type) {
- unpack(buf, buf_len, offset, option_len);
+ unpack(begin, end);
}
-unsigned int
-Option6IA::pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- if (offset + len() > buf_len) {
- isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
- << ", buffer=" << buf_len << ": too small buffer.");
- }
-
- if (len() < 16 ) {
- isc_throw(OutOfRange, "Attempt to build malformed IA option: len="
- << len() << " is too small (at least 16 is required).");
- }
-
- uint8_t* ptr = &buf[offset];
+void Option6IA::pack(isc::util::OutputBuffer& buf) {
+ buf.writeUint16(type_);
+ buf.writeUint16(len() - OPTION6_HDR_LEN);
+ buf.writeUint32(iaid_);
+ buf.writeUint32(t1_);
+ buf.writeUint32(t2_);
- ptr = writeUint16(type_, ptr);
- ptr = writeUint16(len() - OPTION6_HDR_LEN, ptr);
- offset += OPTION6_HDR_LEN;
-
- ptr = writeUint32(iaid_, ptr);
- ptr = writeUint32(t1_, ptr);
- ptr = writeUint32(t2_, ptr);
- offset += OPTION6_IA_LEN;
-
- offset = LibDHCP::packOptions6(buf, buf_len, offset, options_);
- return offset;
+ LibDHCP::packOptions6(buf, options_);
}
-unsigned int
-Option6IA::unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len) {
- if ( parse_len < OPTION6_IA_LEN || offset + OPTION6_IA_LEN > buf_len) {
+void Option6IA::unpack(OptionBufferConstIter begin,
+ OptionBufferConstIter end) {
+ // IA_NA and IA_PD have 12 bytes content (iaid, t1, t2 fields)
+ // followed by 0 or more sub-options.
+ if (distance(begin, end) < OPTION6_IA_LEN) {
isc_throw(OutOfRange, "Option " << type_ << " truncated");
}
-
- iaid_ = readUint32(&buf[offset]);
- offset += sizeof(uint32_t);
-
- t1_ = readUint32(&buf[offset]);
- offset += sizeof(uint32_t);
+ iaid_ = readUint32( &(*begin) );
+ begin += sizeof(uint32_t);
+ t1_ = readUint32( &(*begin) );
+ begin += sizeof(uint32_t);
- t2_ = readUint32(&buf[offset]);
- offset += sizeof(uint32_t);
+ t2_ = readUint32( &(*begin) );
+ begin += sizeof(uint32_t);
- offset = LibDHCP::unpackOptions6(buf, buf_len, offset,
- parse_len - OPTION6_IA_LEN, options_);
-
- return (offset);
+ LibDHCP::unpackOptions6(OptionBuffer(begin, end), options_);
}
std::string Option6IA::toText(int indent /* = 0*/) {
@@ -117,7 +89,7 @@ std::string Option6IA::toText(int indent /* = 0*/) {
tmp << " iaid=" << iaid_ << ", t1=" << t1_ << ", t2=" << t2_
<< " " << options_.size() << " sub-options:" << endl;
- for (Option6Collection::const_iterator opt=options_.begin();
+ for (OptionCollection::const_iterator opt=options_.begin();
opt!=options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
@@ -125,16 +97,19 @@ std::string Option6IA::toText(int indent /* = 0*/) {
return tmp.str();
}
-unsigned short Option6IA::len() {
+uint16_t Option6IA::len() {
- unsigned short length = OPTION6_HDR_LEN /*header (4)*/ +
+ uint16_t length = OPTION6_HDR_LEN /*header (4)*/ +
OPTION6_IA_LEN /* option content (12) */;
// length of all suboptions
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
}
return (length);
}
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
diff --git a/src/lib/dhcp/option6_ia.h b/src/lib/dhcp/option6_ia.h
index 516b2fc..c2089d4 100644
--- a/src/lib/dhcp/option6_ia.h
+++ b/src/lib/dhcp/option6_ia.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -27,54 +27,34 @@ public:
/// Length of IA_NA and IA_PD content
const static size_t OPTION6_IA_LEN = 12;
- /// @brief ctor, used for options constructed, usually during transmission
+ /// @brief Ctor, used for constructed options, usually during transmission.
///
/// @param type option type (usually 4 for IA_NA, 25 for IA_PD)
/// @param iaid identity association identifier (id of IA)
- Option6IA(uint16_t type, unsigned int iaid);
+ Option6IA(uint16_t type, uint32_t iaid);
- /// @brief ctor, used for received options
- ///
- /// boost::shared_array allows sharing a buffer, but it requires that
- /// different instances share pointer to the whole array, not point
- /// to different elements in shared array. Therefore we need to share
- /// pointer to the whole array and remember offset where data for
- /// this option begins
+ /// @brief Ctor, used for received options.
///
/// @param type option type (usually 4 for IA_NA, 25 for IA_PD)
- /// @param buf buffer to be parsed
- /// @param buf_len buffer length
- /// @param offset offset in buffer
- /// @param len number of bytes to parse
- Option6IA(uint16_t type, const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len, unsigned int offset, unsigned int len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ Option6IA(uint16_t type, OptionBuffer::const_iterator begin,
+ OptionBuffer::const_iterator end);
/// Writes option in wire-format to buf, returns pointer to first unused
/// byte after stored option.
///
/// @param buf buffer (option will be stored here)
- /// @param buf_len (buffer length)
- /// @param offset offset place where option should be stored
- ///
- /// @return offset to the first unused byte after stored option
- unsigned int
- pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
- unsigned int offset);
+ void pack(isc::util::OutputBuffer& buf);
/// @brief Parses received buffer
///
/// Parses received buffer and returns offset to the first unused byte after
/// parsed option.
///
- /// @param buf pointer to buffer
- /// @param buf_len length of buf
- /// @param offset offset, where start parsing option
- /// @param parse_len how many bytes should be parsed
- ///
- /// @return offset after last parsed octet
- virtual unsigned int
- unpack(const boost::shared_array<uint8_t>& buf, unsigned int buf_len,
- unsigned int offset, unsigned int parse_len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ virtual void unpack(OptionBufferConstIter begin, OptionBufferConstIter end);
/// Provides human readable text representation
///
@@ -87,48 +67,46 @@ public:
/// Sets T1 timer.
///
/// @param t1 t1 value to be set
- void setT1(unsigned int t1) { t1_=t1; }
-
+ void setT1(uint32_t t1) { t1_=t1; }
/// Sets T2 timer.
///
/// @param t2 t2 value to be set
- void setT2(unsigned int t2) { t2_=t2; }
+ void setT2(uint32_t t2) { t2_=t2; }
/// Returns IA identifier.
///
/// @return IAID value.
///
- unsigned int getIAID() const { return iaid_; }
+ uint32_t getIAID() const { return iaid_; }
/// Returns T1 timer.
///
/// @return T1 value.
- unsigned int getT1() const { return t1_; }
+ uint32_t getT1() const { return t1_; }
/// Returns T2 timer.
///
/// @return T2 value.
- unsigned int getT2() const { return t2_; }
+ uint32_t getT2() const { return t2_; }
/// @brief returns complete length of option
///
/// Returns length of this option, including option header and suboptions
///
/// @return length of this option
- virtual unsigned short
- len();
+ virtual uint16_t len();
protected:
/// keeps IA identifier
- unsigned int iaid_;
+ uint32_t iaid_;
/// keeps T1 timer value
- unsigned int t1_;
+ uint32_t t1_;
/// keeps T2 timer value
- unsigned int t2_;
+ uint32_t t2_;
};
} // isc::dhcp namespace
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
index ce2e64a..e6fc163 100644
--- a/src/lib/dhcp/option6_iaaddr.cc
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -21,86 +21,66 @@
#include <sstream>
#include "exceptions/exceptions.h"
-#include "dhcp/libdhcp.h"
+#include "dhcp/libdhcp++.h"
#include "dhcp/option6_iaaddr.h"
#include "dhcp/dhcp6.h"
#include "asiolink/io_address.h"
#include "util/io_utilities.h"
using namespace std;
-using namespace isc;
-using namespace isc::dhcp;
using namespace isc::asiolink;
using namespace isc::util;
-Option6IAAddr::Option6IAAddr(unsigned short type,
- const isc::asiolink::IOAddress& addr,
- unsigned int pref, unsigned int valid)
+namespace isc {
+namespace dhcp {
+
+Option6IAAddr::Option6IAAddr(uint16_t type, const isc::asiolink::IOAddress& addr,
+ uint32_t pref, uint32_t valid)
:Option(V6, type), addr_(addr), preferred_(pref),
valid_(valid) {
}
-Option6IAAddr::Option6IAAddr(unsigned short type,
- boost::shared_array<uint8_t> buf,
- unsigned int buf_len, unsigned int offset,
- unsigned int option_len)
+Option6IAAddr::Option6IAAddr(uint32_t type, OptionBuffer::const_iterator begin,
+ OptionBuffer::const_iterator end)
:Option(V6, type), addr_("::") {
- unpack(buf, buf_len, offset, option_len);
+ unpack(begin, end);
}
-unsigned int
-Option6IAAddr::pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- if (len() > buf_len) {
- isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
- << ", buffer=" << buf_len << ": too small buffer.");
- }
-
- uint8_t* ptr = &buf[offset];
+void Option6IAAddr::pack(isc::util::OutputBuffer& buf) {
- ptr = writeUint16(type_, ptr);
+ buf.writeUint16(type_);
// len() returns complete option length. len field contains
// length without 4-byte option header
- ptr = writeUint16(len() - OPTION6_HDR_LEN, ptr);
- offset += OPTION6_HDR_LEN;
+ buf.writeUint16(len() - getHeaderLen());
- memcpy(ptr, addr_.getAddress().to_v6().to_bytes().data(), 16);
- ptr += V6ADDRESS_LEN;
- ptr = writeUint32(preferred_, ptr);
+ buf.writeData(addr_.getAddress().to_v6().to_bytes().data(),
+ isc::asiolink::V6ADDRESS_LEN);
- ptr = writeUint32(valid_, ptr);
- offset += OPTION6_IAADDR_LEN;
+ buf.writeUint32(preferred_);
+ buf.writeUint32(valid_);
- // parse suboption (there shouldn't be any)
- offset = LibDHCP::packOptions6(buf, buf_len, offset, options_);
- return offset;
+ // parse suboption (there shouldn't be any for IAADDR)
+ LibDHCP::packOptions6(buf, options_);
}
-unsigned int
-Option6IAAddr::unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len) {
- if ( parse_len < OPTION6_IAADDR_LEN || offset + OPTION6_IAADDR_LEN > buf_len) {
+void Option6IAAddr::unpack(OptionBuffer::const_iterator begin,
+ OptionBuffer::const_iterator end) {
+ if ( distance(begin, end) < OPTION6_IAADDR_LEN) {
isc_throw(OutOfRange, "Option " << type_ << " truncated");
}
// 16 bytes: IPv6 address
- addr_ = IOAddress::from_bytes(AF_INET6, &buf[offset]);
- offset += V6ADDRESS_LEN;
+ addr_ = IOAddress::from_bytes(AF_INET6, &(*begin));
+ begin += V6ADDRESS_LEN;
- preferred_ = readUint32(&buf[offset]);
- offset += sizeof(uint32_t);
+ preferred_ = readUint32( &(*begin) );
+ begin += sizeof(uint32_t);
- valid_ = readUint32(&buf[offset]);
- offset += sizeof(uint32_t);
- offset = LibDHCP::unpackOptions6(buf, buf_len, offset,
- parse_len - 24, options_);
-
- return offset;
+ valid_ = readUint32( &(*begin) );
+ begin += sizeof(uint32_t);
+ LibDHCP::unpackOptions6(OptionBuffer(begin, end), options_);
}
std::string Option6IAAddr::toText(int indent /* =0 */) {
@@ -112,7 +92,7 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
<< ", preferred-lft=" << preferred_ << ", valid-lft="
<< valid_ << endl;
- for (Option6Collection::const_iterator opt=options_.begin();
+ for (OptionCollection::const_iterator opt=options_.begin();
opt!=options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
@@ -120,17 +100,20 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
return tmp.str();
}
-unsigned short Option6IAAddr::len() {
+uint16_t Option6IAAddr::len() {
- unsigned short length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
+ uint16_t length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
// length of all suboptions
// TODO implement:
// protected: unsigned short Option::lenHelper(int header_size);
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
}
return (length);
}
+
+} // end of namespace isc::dhcp
+} // end of namespace isc
diff --git a/src/lib/dhcp/option6_iaaddr.h b/src/lib/dhcp/option6_iaaddr.h
index 60c5c48..e6e2c16 100644
--- a/src/lib/dhcp/option6_iaaddr.h
+++ b/src/lib/dhcp/option6_iaaddr.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -27,28 +27,22 @@ public:
/// length of the fixed part of the IAADDR option
static const size_t OPTION6_IAADDR_LEN = 24;
- /// @brief ctor, used for options constructed (during transmission)
+ /// @brief Ctor, used for options constructed (during transmission).
///
/// @param type option type
/// @param addr reference to an address
/// @param preferred address preferred lifetime (in seconds)
/// @param valid address valid lifetime (in seconds)
- Option6IAAddr(unsigned short type, const isc::asiolink::IOAddress& addr,
- unsigned int preferred, unsigned int valid);
-
- /// ctor, used for received options
- /// boost::shared_array allows sharing a buffer, but it requires that
- /// different instances share pointer to the whole array, not point
- /// to different elements in shared array. Therefore we need to share
- /// pointer to the whole array and remember offset where data for
- /// this option begins
+ Option6IAAddr(uint16_t type, const isc::asiolink::IOAddress& addr,
+ uint32_t preferred, uint32_t valid);
+
+ /// @brief ctor, used for received options.
///
/// @param type option type
- /// @param buf pointer to a buffer
- /// @param offset offset to first data byte in that buffer
- /// @param len data length of this option
- Option6IAAddr(unsigned short type, boost::shared_array<uint8_t> buf,
- unsigned int buf_len, unsigned int offset, unsigned int len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ Option6IAAddr(uint32_t type, OptionBuffer::const_iterator begin,
+ OptionBuffer::const_iterator end);
/// @brief Writes option in wire-format.
///
@@ -56,30 +50,14 @@ public:
/// byte after stored option.
///
/// @param buf pointer to a buffer
- /// @param buf_len length of the buffer
- /// @param offset offset to place, where option shout be stored
- ///
- /// @return offset to first unused byte after stored option
- unsigned int
- pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
- unsigned int offset);
+ void pack(isc::util::OutputBuffer& buf);
- /// @brief Parses buffer.
- ///
- /// Parses received buffer, returns offset to the first unused byte after
- /// parsed option.
- ///
- /// @param buf pointer to buffer
- /// @param buf_len length of buf
- /// @param offset offset, where start parsing option
- /// @param parse_len how many bytes should be parsed
+ /// @brief Parses received buffer.
///
- /// @return offset after last parsed octet
- virtual unsigned int
- unpack(const boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset,
- unsigned int parse_len);
+ /// @param begin iterator to first byte of option data
+ /// @param end iterator to end of option data (first byte after option end)
+ virtual void unpack(OptionBufferConstIter begin,
+ OptionBufferConstIter end);
/// Returns string representation of the option.
///
@@ -126,8 +104,7 @@ public:
getValid() const { return valid_; }
/// returns data length (data length + DHCPv4/DHCPv6 option header)
- virtual unsigned short
- len();
+ virtual uint16_t len();
protected:
/// contains an IPv6 address
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index a9a5caa..2c3f1eb 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -12,9 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <config.h>
#include <dhcp/pkt4.h>
-#include <dhcp/libdhcp.h>
+#include <dhcp/libdhcp++.h>
#include <dhcp/dhcp4.h>
#include <exceptions/exceptions.h>
#include <asiolink/io_address.h>
@@ -26,10 +25,13 @@ using namespace isc::dhcp;
using namespace isc::asiolink;
namespace isc {
+namespace dhcp {
+
+const IOAddress DEFAULT_ADDRESS("0.0.0.0");
Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
- :local_addr_(IOAddress("0.0.0.0")),
- remote_addr_(IOAddress("0.0.0.0")),
+ :local_addr_(DEFAULT_ADDRESS),
+ remote_addr_(DEFAULT_ADDRESS),
iface_(""),
ifindex_(0),
local_port_(DHCP4_SERVER_PORT),
@@ -41,93 +43,200 @@ Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
transid_(transid),
secs_(0),
flags_(0),
- ciaddr_(IOAddress("0.0.0.0")),
- yiaddr_(IOAddress("0.0.0.0")),
- siaddr_(IOAddress("0.0.0.0")),
- giaddr_(IOAddress("0.0.0.0")),
- bufferIn_(0), // not used, this is TX packet
+ ciaddr_(DEFAULT_ADDRESS),
+ yiaddr_(DEFAULT_ADDRESS),
+ siaddr_(DEFAULT_ADDRESS),
+ giaddr_(DEFAULT_ADDRESS),
bufferOut_(DHCPV4_PKT_HDR_LEN),
msg_type_(msg_type)
{
- /// TODO: fixed fields, uncomment in ticket #1224
memset(chaddr_, 0, MAX_CHADDR_LEN);
memset(sname_, 0, MAX_SNAME_LEN);
memset(file_, 0, MAX_FILE_LEN);
}
Pkt4::Pkt4(const uint8_t* data, size_t len)
- :local_addr_(IOAddress("0.0.0.0")),
- remote_addr_(IOAddress("0.0.0.0")),
+ :local_addr_(DEFAULT_ADDRESS),
+ remote_addr_(DEFAULT_ADDRESS),
iface_(""),
- ifindex_(-1),
+ ifindex_(0),
local_port_(DHCP4_SERVER_PORT),
remote_port_(DHCP4_CLIENT_PORT),
- /// TODO Fixed fields, uncomment in ticket #1224
op_(BOOTREQUEST),
- transid_(transid_),
+ transid_(0),
secs_(0),
flags_(0),
- ciaddr_(IOAddress("0.0.0.0")),
- yiaddr_(IOAddress("0.0.0.0")),
- siaddr_(IOAddress("0.0.0.0")),
- giaddr_(IOAddress("0.0.0.0")),
- bufferIn_(0), // not used, this is TX packet
- bufferOut_(DHCPV4_PKT_HDR_LEN),
+ ciaddr_(DEFAULT_ADDRESS),
+ yiaddr_(DEFAULT_ADDRESS),
+ siaddr_(DEFAULT_ADDRESS),
+ giaddr_(DEFAULT_ADDRESS),
+ bufferOut_(0), // not used, this is RX packet
msg_type_(DHCPDISCOVER)
{
if (len < DHCPV4_PKT_HDR_LEN) {
isc_throw(OutOfRange, "Truncated DHCPv4 packet (len=" << len
- << " received, at least 236 bytes expected.");
+ << ") received, at least " << DHCPV4_PKT_HDR_LEN
+ << " is expected.");
}
- bufferIn_.writeData(data, len);
+
+ data_.resize(len);
+ memcpy(&data_[0], data, len);
}
size_t
Pkt4::len() {
size_t length = DHCPV4_PKT_HDR_LEN; // DHCPv4 header
- /// TODO: Include options here (ticket #1228)
+ // ... and sum of lengths of all options
+ for (Option::OptionCollection::const_iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+
return (length);
}
bool
Pkt4::pack() {
- /// TODO: Implement this (ticket #1227)
+ bufferOut_.writeUint8(op_);
+ bufferOut_.writeUint8(htype_);
+ bufferOut_.writeUint8(hlen_);
+ bufferOut_.writeUint8(hops_);
+ bufferOut_.writeUint32(transid_);
+ bufferOut_.writeUint16(secs_);
+ bufferOut_.writeUint16(flags_);
+ bufferOut_.writeUint32(ciaddr_);
+ bufferOut_.writeUint32(yiaddr_);
+ bufferOut_.writeUint32(siaddr_);
+ bufferOut_.writeUint32(giaddr_);
+ bufferOut_.writeData(chaddr_, MAX_CHADDR_LEN);
+ bufferOut_.writeData(sname_, MAX_SNAME_LEN);
+ bufferOut_.writeData(file_, MAX_FILE_LEN);
- return (false);
+ // write DHCP magic cookie
+ bufferOut_.writeUint32(DHCP_OPTIONS_COOKIE);
+
+ LibDHCP::packOptions(bufferOut_, options_);
+
+ // add END option that indicates end of options
+ // (End option is very simple, just a 255 octet)
+ bufferOut_.writeUint8(DHO_END);
+
+ return (true);
}
-bool
+
+void
Pkt4::unpack() {
- /// TODO: Implement this (ticket #1226)
- return (false);
+ // input buffer (used during message reception)
+ isc::util::InputBuffer bufferIn(&data_[0], data_.size());
+
+ if (bufferIn.getLength() < DHCPV4_PKT_HDR_LEN) {
+ isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
+ << bufferIn.getLength() << " received, at least "
+ << DHCPV4_PKT_HDR_LEN << "is expected");
+ }
+
+ op_ = bufferIn.readUint8();
+ htype_ = bufferIn.readUint8();
+ hlen_ = bufferIn.readUint8();
+ hops_ = bufferIn.readUint8();
+ transid_ = bufferIn.readUint32();
+ secs_ = bufferIn.readUint16();
+ flags_ = bufferIn.readUint16();
+ ciaddr_ = IOAddress(bufferIn.readUint32());
+ yiaddr_ = IOAddress(bufferIn.readUint32());
+ siaddr_ = IOAddress(bufferIn.readUint32());
+ giaddr_ = IOAddress(bufferIn.readUint32());
+ bufferIn.readData(chaddr_, MAX_CHADDR_LEN);
+ bufferIn.readData(sname_, MAX_SNAME_LEN);
+ bufferIn.readData(file_, MAX_FILE_LEN);
+
+ if (bufferIn.getLength() == bufferIn.getPosition()) {
+ // this is *NOT* DHCP packet. It does not have any DHCPv4 options. In
+ // particular, it does not have magic cookie, a 4 byte sequence that
+ // differentiates between DHCP and BOOTP packets.
+ isc_throw(InvalidOperation, "Recevied BOOTP packet. BOOTP is not supported.");
+ }
+
+ if (bufferIn.getLength() - bufferIn.getPosition() < 4) {
+ // there is not enough data to hold magic DHCP cookie
+ isc_throw(Unexpected, "Truncated or no DHCP packet.");
+ }
+
+ uint32_t magic = bufferIn.readUint32();
+ if (magic != DHCP_OPTIONS_COOKIE) {
+ isc_throw(Unexpected, "Invalid or missing DHCP magic cookie");
+ }
+
+ size_t opts_len = bufferIn.getLength() - bufferIn.getPosition();
+ vector<uint8_t> optsBuffer;
+
+ // First use of readVector.
+ bufferIn.readVector(optsBuffer, opts_len);
+ LibDHCP::unpackOptions4(optsBuffer, options_);
+
+ // TODO: check will need to be called separately, so hooks can be called after
+ // packet is parsed, but before its content is verified
+ check();
+}
+
+void Pkt4::check() {
+ boost::shared_ptr<Option> typeOpt = getOption(DHO_DHCP_MESSAGE_TYPE);
+ if (typeOpt) {
+ uint8_t msg_type = typeOpt->getUint8();
+ if (msg_type>DHCPLEASEACTIVE) {
+ isc_throw(BadValue, "Invalid DHCP message type received:" << msg_type);
+ }
+ msg_type_ = msg_type;
+
+ } else {
+ isc_throw(Unexpected, "Missing DHCP Message Type option");
+ }
+}
+
+void Pkt4::repack() {
+ cout << "Convering RX packet to TX packet: " << data_.size() << " bytes." << endl;
+
+ bufferOut_.writeData(&data_[0], data_.size());
}
std::string
Pkt4::toText() {
stringstream tmp;
- tmp << "localAddr=[" << local_addr_.toText() << "]:" << local_port_
- << " remoteAddr=[" << remote_addr_.toText()
- << "]:" << remote_port_ << endl;
- tmp << "msgtype=" << msg_type_
- << ", transid=0x" << hex << transid_ << dec
- << endl;
+ tmp << "localAddr=" << local_addr_.toText() << ":" << local_port_
+ << " remoteAddr=" << remote_addr_.toText()
+ << ":" << remote_port_ << ", msgtype=" << int(msg_type_)
+ << ", transid=0x" << hex << transid_ << dec << endl;
+
+ for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
+ opt != options_.end();
+ ++opt) {
+ tmp << " " << opt->second->toText() << std::endl;
+ }
+
return tmp.str();
}
void
-Pkt4::setHWAddr(uint8_t hType, uint8_t hlen, const uint8_t* macAddr) {
+Pkt4::setHWAddr(uint8_t hType, uint8_t hlen,
+ const std::vector<uint8_t>& macAddr) {
/// TODO Rewrite this once support for client-identifier option
/// is implemented (ticket 1228?)
if (hlen>MAX_CHADDR_LEN) {
isc_throw(OutOfRange, "Hardware address (len=" << hlen
<< " too long. Max " << MAX_CHADDR_LEN << " supported.");
}
+ if ( (macAddr.size() == 0) && (hlen > 0) ) {
+ isc_throw(OutOfRange, "Invalid HW Address specified");
+ }
+
htype_ = hType;
hlen_ = hlen;
memset(chaddr_, 0, MAX_CHADDR_LEN);
- memcpy(chaddr_, macAddr, hlen);
+ memcpy(chaddr_, &macAddr[0], hlen);
}
void
@@ -151,7 +260,7 @@ Pkt4::setFile(const uint8_t* file, size_t fileLen /*= MAX_FILE_LEN*/) {
memset(file_, 0, MAX_FILE_LEN);
memcpy(file_, file, fileLen);
- // no need to store snameLen as any empty space is filled with 0s
+ // no need to store fileLen as any empty space is filled with 0s
}
uint8_t
@@ -177,4 +286,30 @@ Pkt4::DHCPTypeToBootpType(uint8_t dhcpType) {
}
}
-};
+void
+Pkt4::addOption(boost::shared_ptr<Option> opt) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt4::getOption(uint8_t type) {
+ Option::OptionCollection::const_iterator x = options_.find(type);
+ if (x!=options_.end()) {
+ return (*x).second;
+ }
+ return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+void
+Pkt4::updateTimestamp() {
+ timestamp_ = boost::posix_time::microsec_clock::universal_time();
+}
+
+} // end of namespace isc::dhcp
+
+} // end of namespace isc
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index cc20ad8..b72c03e 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -16,8 +16,10 @@
#define PKT4_H
#include <iostream>
+#include <time.h>
+#include <vector>
#include <boost/shared_ptr.hpp>
-#include <boost/shared_array.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp>
#include "asiolink/io_address.h"
#include "util/buffer.h"
#include "dhcp/option.h"
@@ -29,180 +31,196 @@ namespace dhcp {
class Pkt4 {
public:
- // length of the CHADDR field in DHCPv4 message
+ /// length of the CHADDR field in DHCPv4 message
const static size_t MAX_CHADDR_LEN = 16;
- // length of the SNAME field in DHCPv4 message
+ /// length of the SNAME field in DHCPv4 message
const static size_t MAX_SNAME_LEN = 64;
- // length of the FILE field in DHCPv4 message
+ /// length of the FILE field in DHCPv4 message
const static size_t MAX_FILE_LEN = 128;
- /// specifes DHCPv4 packet header length (fixed part)
+ /// specifies DHCPv4 packet header length (fixed part)
const static size_t DHCPV4_PKT_HDR_LEN = 236;
- /// Constructor, used in replying to a message
+ /// Constructor, used in replying to a message.
///
/// @param msg_type type of message (e.g. DHCPDISOVER=1)
/// @param transid transaction-id
Pkt4(uint8_t msg_type, uint32_t transid);
- /// Constructor, used in message transmission
+ /// @brief Constructor, used in message reception.
///
- /// Creates new message. Transaction-id will randomized.
+ /// Creates new message. Pkt4 will copy data to bufferIn_
+ /// buffer on creation.
///
/// @param data pointer to received data
/// @param len size of buffer to be allocated for this packet.
Pkt4(const uint8_t* data, size_t len);
- /// @brief Prepares on-wire format.
+ /// @brief Prepares on-wire format of DHCPv4 packet.
///
/// Prepares on-wire format of message and all its options.
/// Options must be stored in options_ field.
- /// Output buffer will be stored in data_. Length
- /// will be set in data_len_.
+ /// Output buffer will be stored in bufferOut_.
///
/// @return true if packing procedure was successful
bool
pack();
- /// @brief Parses on-wire form of UDP DHCPv6 packet.
+ /// @brief Parses on-wire form of DHCPv4 packet.
+ ///
+ /// Parses received packet, stored in on-wire format in bufferIn_.
///
- /// Parses received packet, stored in on-wire format in data_.
- /// data_len_ must be set to indicate data length.
/// Will create a collection of option objects that will
/// be stored in options_ container.
///
- /// @return true, if build was successful
- bool
- unpack();
+ /// Method with throw exception if packet parsing fails.
+ void unpack();
+
+ /// @brief performs sanity check on a packet.
+ ///
+ /// This is usually performed after unpack(). It checks if packet is sane:
+ /// required options are present, fields have sane content etc.
+ /// For example verifies that DHCP_MESSAGE_TYPE is present and have
+ /// reasonable value. This method is expected to grow significantly.
+ /// It makes sense to separate unpack() and check() for testing purposes.
+ ///
+ /// TODO: It is called from unpack() directly. It should be separated.
+ ///
+ /// Method will throw exception if anomaly is found.
+ void check();
+
+ /// @brief Copies content of input buffer to output buffer.
+ ///
+ /// This is mostly a diagnostic function. It is being used for sending
+ /// received packet. Received packet is stored in bufferIn_, but
+ /// transmitted data is stored in bufferOut_. If we want to send packet
+ /// that we just received, a copy between those two buffers is necessary.
+ void repack();
/// @brief Returns text representation of the packet.
///
/// This function is useful mainly for debugging.
///
/// @return string with text representation
- std::string
- toText();
+ std::string toText();
- /// @brief Returns calculated length of the packet.
+ /// @brief Returns the size of the required buffer to build the packet.
///
- /// This function returns size of required buffer to buld this packet.
- /// To use that function, options_ field must be set.
+ /// Returns the size of the required buffer to build the packet with
+ /// the current set of packet options.
///
/// @return number of bytes required to build this packet
- size_t
- len();
+ size_t len();
- /// Sets hops field
+ /// @brief Sets hops field.
///
/// @param hops value to be set
- void
- setHops(uint8_t hops) { hops_ = hops; };
+ void setHops(uint8_t hops) { hops_ = hops; };
- /// Returns hops field
+ /// @brief Returns hops field.
///
/// @return hops field
- uint8_t
- getHops() { return (hops_); };
+ uint8_t getHops() const { return (hops_); };
// Note: There's no need to manipulate OP field directly,
// thus no setOp() method. See op_ comment.
- /// Returns op field
+ /// @brief Returns op field.
///
/// @return op field
- uint8_t
- getOp() { return (op_); };
+ uint8_t getOp() const { return (op_); };
- /// Sets secs field
+ /// @brief Sets secs field.
///
/// @param secs value to be set
- void
- setSecs(uint16_t secs) { secs_ = secs; };
+ void setSecs(uint16_t secs) { secs_ = secs; };
- /// Returns secs field
+ /// @brief Returns secs field.
///
/// @return secs field
- uint16_t
- getSecs() { return (secs_); };
+ uint16_t getSecs() const { return (secs_); };
- /// Sets flags field
+ /// @brief Sets flags field.
///
/// @param flags value to be set
- void
- setFlags(uint16_t flags) { flags_ = flags; };
+ void setFlags(uint16_t flags) { flags_ = flags; };
- /// Returns flags field
+ /// @brief Returns flags field.
///
/// @return flags field
- uint16_t
- getFlags() { return (flags_); };
+ uint16_t getFlags() const { return (flags_); };
- /// Returns ciaddr field
+ /// @brief Returns ciaddr field.
///
/// @return ciaddr field
- isc::asiolink::IOAddress&
- getCiaddr() { return (ciaddr_); };
+ const isc::asiolink::IOAddress&
+ getCiaddr() const { return (ciaddr_); };
- /// Sets ciaddr field
+ /// @brief Sets ciaddr field.
///
/// @param ciaddr value to be set
void
setCiaddr(const isc::asiolink::IOAddress& ciaddr) { ciaddr_ = ciaddr; };
- /// Returns siaddr field
+ /// @brief Returns siaddr field.
///
/// @return siaddr field
- isc::asiolink::IOAddress&
- getSiaddr() { return (siaddr_); };
+ const isc::asiolink::IOAddress&
+ getSiaddr() const { return (siaddr_); };
- /// Sets siaddr field
+ /// @brief Sets siaddr field.
///
/// @param siaddr value to be set
void
setSiaddr(const isc::asiolink::IOAddress& siaddr) { siaddr_ = siaddr; };
- /// Returns yiaddr field
+ /// @brief Returns yiaddr field.
///
/// @return yiaddr field
- isc::asiolink::IOAddress&
- getYiaddr() { return (yiaddr_); };
+ const isc::asiolink::IOAddress&
+ getYiaddr() const { return (yiaddr_); };
- /// Sets yiaddr field
+ /// @brief Sets yiaddr field.
///
/// @param yiaddr value to be set
void
setYiaddr(const isc::asiolink::IOAddress& yiaddr) { yiaddr_ = yiaddr; };
- /// Returns giaddr field
+ /// @brief Returns giaddr field.
///
/// @return giaddr field
- isc::asiolink::IOAddress&
- getGiaddr() { return (giaddr_); };
+ const isc::asiolink::IOAddress&
+ getGiaddr() const { return (giaddr_); };
- /// Sets giaddr field
+ /// @brief Sets giaddr field.
///
/// @param giaddr value to be set
void
setGiaddr(const isc::asiolink::IOAddress& giaddr) { giaddr_ = giaddr; };
- /// Returns value of transaction-id field
+ /// @brief Sets transaction-id value
+ ///
+ /// @param transid transaction-id to be set.
+ void setTransid(uint32_t transid) { transid_ = transid; }
+
+ /// @brief Returns value of transaction-id field.
///
/// @return transaction-id
- uint32_t getTransid() { return (transid_); };
+ uint32_t getTransid() const { return (transid_); };
- /// Returns message type (e.g. 1 = DHCPDISCOVER)
+ /// @brief Returns message type (e.g. 1 = DHCPDISCOVER).
///
/// @return message type
uint8_t
- getType() { return (msg_type_); }
+ getType() const { return (msg_type_); }
- /// Sets message type (e.g. 1 = DHCPDISCOVER)
+ /// @brief Sets message type (e.g. 1 = DHCPDISCOVER).
///
/// @param type message type to be set
void setType(uint8_t type) { msg_type_=type; };
@@ -210,62 +228,187 @@ public:
/// @brief Returns sname field
///
/// Note: This is 64 bytes long field. It doesn't have to be
- /// null-terminated. Do no use strlen() or similar on it.
+ /// null-terminated. Do not use strlen() or similar on it.
///
/// @return sname field
- const uint8_t*
- getSname() { return (sname_); };
+ const OptionBuffer
+ getSname() const { return (std::vector<uint8_t>(sname_, &sname_[MAX_SNAME_LEN])); };
- /// Sets sname field
+ /// @brief Sets sname field.
///
/// @param sname value to be set
- void
- setSname(const uint8_t* sname, size_t snameLen = MAX_SNAME_LEN);
+ /// @param sname_len length of the sname buffer (up to MAX_SNAME_LEN)
+ void setSname(const uint8_t* sname, size_t sname_len = MAX_SNAME_LEN);
/// @brief Returns file field
///
/// Note: This is 128 bytes long field. It doesn't have to be
- /// null-terminated. Do no use strlen() or similar on it.
+ /// null-terminated. Do not use strlen() or similar on it.
///
/// @return pointer to file field
- const uint8_t*
- getFile() { return (file_); };
+ const OptionBuffer
+ getFile() const { return (std::vector<uint8_t>(file_, &file_[MAX_FILE_LEN])); };
/// Sets file field
///
/// @param file value to be set
+ /// @param file_len length of the file buffer (up to MAX_FILE_LEN)
void
- setFile(const uint8_t* file, size_t fileLen = MAX_FILE_LEN);
+ setFile(const uint8_t* file, size_t file_len = MAX_FILE_LEN);
- /// Sets hardware address
+ /// @brief Sets hardware address.
+ ///
+ /// Sets parameters of hardware address. hlen specifies
+ /// length of macAddr buffer. Content of macAddr buffer
+ /// will be copied to appropriate field.
+ ///
+ /// Note: macAddr must be a buffer of at least hlen bytes.
///
- /// @param hwType hardware type (will be sent in htype field)
+ /// @param hType hardware type (will be sent in htype field)
/// @param hlen hardware length (will be sent in hlen field)
/// @param macAddr pointer to hardware address
void setHWAddr(uint8_t hType, uint8_t hlen,
- const uint8_t* macAddr);
+ const std::vector<uint8_t>& macAddr);
/// Returns htype field
///
/// @return hardware type
uint8_t
- getHtype() { return (htype_); };
+ getHtype() const { return (htype_); };
/// Returns hlen field
///
/// @return hardware address length
uint8_t
- getHlen() { return (hlen_); };
+ getHlen() const { return (hlen_); };
- /// @brief Returns chaddr field
+ /// @brief Returns chaddr field.
///
/// Note: This is 16 bytes long field. It doesn't have to be
/// null-terminated. Do no use strlen() or similar on it.
///
/// @return pointer to hardware address
const uint8_t*
- getChaddr() { return (chaddr_); };
+ getChaddr() const { return (chaddr_); };
+
+
+ /// @brief Returns reference to output buffer.
+ ///
+ /// Returned buffer will contain reasonable data only for
+ /// output (TX) packet and after pack() was called. This buffer
+ /// is only valid till Pkt4 object is valid.
+ ///
+ /// RX packet or TX packet before pack() will return buffer with
+ /// zero length
+ ///
+ /// @return reference to output buffer
+ const isc::util::OutputBuffer&
+ getBuffer() const { return (bufferOut_); };
+
+ /// @brief Add an option.
+ ///
+ /// Throws BadValue if option with that type is already present.
+ ///
+ /// @param opt option to be added
+ void
+ addOption(boost::shared_ptr<Option> opt);
+
+ /// @brief Returns an option of specified type.
+ ///
+ /// @return returns option of requested type (or NULL)
+ /// if no such option is present
+ boost::shared_ptr<Option>
+ getOption(uint8_t opt_type);
+
+ /// @brief Returns interface name.
+ ///
+ /// Returns interface name over which packet was received or is
+ /// going to be transmitted.
+ ///
+ /// @return interface name
+ std::string getIface() const { return iface_; };
+ /// @brief Returns packet timestamp.
+ ///
+ /// Returns packet timestamp value updated when
+ /// packet is received or send.
+ ///
+ /// @return packet timestamp.
+ const boost::posix_time::ptime& getTimestamp() const { return timestamp_; }
+
+ /// @brief Sets interface name.
+ ///
+ /// Sets interface name over which packet was received or is
+ /// going to be transmitted.
+ ///
+ /// @return interface name
+ void setIface(const std::string& iface ) { iface_ = iface; };
+
+ /// @brief Sets interface index.
+ ///
+ /// @param ifindex specifies interface index.
+ void setIndex(uint32_t ifindex) { ifindex_ = ifindex; };
+
+ /// @brief Returns interface index.
+ ///
+ /// @return interface index
+ uint32_t getIndex() const { return (ifindex_); };
+
+ /// @brief Sets remote address.
+ ///
+ /// @param remote specifies remote address
+ void setRemoteAddr(const isc::asiolink::IOAddress& remote) {
+ remote_addr_ = remote;
+ }
+
+ /// @brief Returns remote address
+ ///
+ /// @return remote address
+ const isc::asiolink::IOAddress& getRemoteAddr() {
+ return (remote_addr_);
+ }
+
+ /// @brief Sets local address.
+ ///
+ /// @param local specifies local address
+ void setLocalAddr(const isc::asiolink::IOAddress& local) {
+ local_addr_ = local;
+ }
+
+ /// @brief Returns local address.
+ ///
+ /// @return local address
+ const isc::asiolink::IOAddress& getLocalAddr() {
+ return (local_addr_);
+ }
+
+ /// @brief Sets local port.
+ ///
+ /// @param local specifies local port
+ void setLocalPort(uint16_t local) { local_port_ = local; }
+
+ /// @brief Returns local port.
+ ///
+ /// @return local port
+ uint16_t getLocalPort() { return (local_port_); }
+
+ /// @brief Sets remote port.
+ ///
+ /// @param remote specifies remote port
+ void setRemotePort(uint16_t remote) { remote_port_ = remote; }
+
+ /// @brief Returns remote port.
+ ///
+ /// @return remote port
+ uint16_t getRemotePort() { return (remote_port_); }
+
+ /// @brief Update packet timestamp.
+ ///
+ /// Updates packet timestamp. This method is invoked
+ /// by interface manager just before sending or
+ /// just after receiving it.
+ /// @throw isc::Unexpected if timestamp update failed
+ void updateTimestamp();
protected:
@@ -288,22 +431,24 @@ protected:
/// @brief interface index
///
- /// interface index (each network interface has assigned unique ifindex
- /// it is functional equvalent of name, but sometimes more useful, e.g.
- /// when using crazy systems that allow spaces in interface names
- /// e.g. windows
- int ifindex_;
+ /// Each network interface has assigned unique ifindex. It is functional
+ /// equvalent of name, but sometimes more useful, e.g. when using crazy
+ /// systems that allow spaces in interface names e.g. MS Windows)
+ uint32_t ifindex_;
/// local UDP port
- int local_port_;
+ uint16_t local_port_;
/// remote UDP port
- int remote_port_;
+ uint16_t remote_port_;
- /// message operation code (kept due to BOOTP format, this is NOT DHCPv4 type)
+ /// @brief message operation code
///
- /// Note: This is legacy BOOTP field. There's no need to manipulate it
- /// directly. Its value is set based on DHCP message type.
+ /// Note: This is legacy BOOTP field. There's no need to manipulate it
+ /// directly. Its value is set based on DHCP message type. Note that
+ /// DHCPv4 protocol reuses BOOTP message format, so this field is
+ /// kept due to BOOTP format. This is NOT DHCPv4 type (DHCPv4 message
+ /// type is kept in message type option).
uint8_t op_;
/// link-layer address type
@@ -324,46 +469,73 @@ protected:
/// flags
uint16_t flags_;
- // ciaddr field (32 bits): Client's IP address
+ /// ciaddr field (32 bits): Client's IP address
isc::asiolink::IOAddress ciaddr_;
- // yiaddr field (32 bits): Client's IP address ("your"), set by server
+ /// yiaddr field (32 bits): Client's IP address ("your"), set by server
isc::asiolink::IOAddress yiaddr_;
- // siaddr field (32 bits): next server IP address in boot process(e.g.TFTP)
+ /// siaddr field (32 bits): next server IP address in boot process(e.g.TFTP)
isc::asiolink::IOAddress siaddr_;
- // giaddr field (32 bits): Gateway IP address
+ /// giaddr field (32 bits): Gateway IP address
isc::asiolink::IOAddress giaddr_;
- // ciaddr field (32 bits): Client's IP address
- uint8_t chaddr_[16];
-
- // sname 64 bytes
- uint8_t sname_[64];
+ /// Hardware address field (16 bytes)
+ uint8_t chaddr_[MAX_CHADDR_LEN];
- // file
- uint8_t file_[128];
+ /// sname field (64 bytes)
+ uint8_t sname_[MAX_SNAME_LEN];
- // end of real DHCPv4 fields
+ /// file field (128 bytes)
+ uint8_t file_[MAX_FILE_LEN];
- /// input buffer (used during message reception)
- /// Note that it must be modifiable as hooks can modify incoming buffer),
- /// thus OutputBuffer, not InputBuffer
- isc::util::OutputBuffer bufferIn_;
+ // end of real DHCPv4 fields
- /// output buffer (used during message
+ /// output buffer (used during message transmission)
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt4. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
isc::util::OutputBuffer bufferOut_;
+ /// that's the data of input buffer used in RX packet. Note that
+ /// InputBuffer does not store the data itself, but just expects that
+ /// data will be valid for the whole life of InputBuffer. Therefore we
+ /// need to keep the data around.
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt4. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
+ std::vector<uint8_t> data_;
+
/// message type (e.g. 1=DHCPDISCOVER)
- /// TODO: this will eventually be replaced with DHCP Message Type
+ /// TODO: this will eventually be replaced with DHCP Message Type
/// option (option 53)
uint8_t msg_type_;
/// collection of options present in this message
- isc::dhcp::Option::Option4Collection options_;
+ ///
+ /// @warnig This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt4. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
+ isc::dhcp::Option::OptionCollection options_;
+
+ /// packet timestamp
+ boost::posix_time::ptime timestamp_;
}; // Pkt4 class
+typedef boost::shared_ptr<Pkt4> Pkt4Ptr;
+
} // isc::dhcp namespace
} // isc namespace
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
index dbc6c8e..a65170d 100644
--- a/src/lib/dhcp/pkt6.cc
+++ b/src/lib/dhcp/pkt6.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,58 +12,52 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include "config.h"
-#include "dhcp/dhcp6.h"
-#include "dhcp/pkt6.h"
-#include "dhcp/libdhcp.h"
-#include "exceptions/exceptions.h"
+#include <config.h>
+
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/libdhcp++.h>
+#include <exceptions/exceptions.h>
#include <iostream>
#include <sstream>
using namespace std;
-using namespace isc::dhcp;
namespace isc {
-
-Pkt6::Pkt6(unsigned int dataLen, DHCPv6Proto proto /* = UDP */)
- :data_len_(dataLen),
- local_addr_("::"),
- remote_addr_("::"),
- iface_(""),
- ifindex_(-1),
- local_port_(-1),
- remote_port_(-1),
- proto_(proto),
- msg_type_(-1),
- transid_(rand()%0xffffff)
-{
-
- data_ = boost::shared_array<uint8_t>(new uint8_t[dataLen]);
- data_len_ = dataLen;
+namespace dhcp {
+
+Pkt6::Pkt6(const uint8_t* buf, uint32_t buf_len, DHCPv6Proto proto /* = UDP */) :
+ proto_(proto),
+ msg_type_(0),
+ transid_(rand()%0xffffff),
+ iface_(""),
+ ifindex_(-1),
+ local_addr_("::"),
+ remote_addr_("::"),
+ local_port_(0),
+ remote_port_(0),
+ bufferOut_(0) {
+ data_.resize(buf_len);
+ memcpy(&data_[0], buf, buf_len);
}
-Pkt6::Pkt6(uint8_t msg_type,
- unsigned int transid,
- DHCPv6Proto proto /*= UDP*/)
- :local_addr_("::"),
- remote_addr_("::"),
- iface_(""),
- ifindex_(-1),
- local_port_(-1),
- remote_port_(-1),
- proto_(proto),
- msg_type_(msg_type),
- transid_(transid) {
-
- data_ = boost::shared_array<uint8_t>(new uint8_t[4]);
- data_len_ = 4;
+Pkt6::Pkt6(uint8_t msg_type, uint32_t transid, DHCPv6Proto proto /*= UDP*/) :
+ proto_(proto),
+ msg_type_(msg_type),
+ transid_(transid),
+ iface_(""),
+ ifindex_(-1),
+ local_addr_("::"),
+ remote_addr_("::"),
+ local_port_(0),
+ remote_port_(0),
+ bufferOut_(0) {
}
-unsigned short
-Pkt6::len() {
- unsigned int length = DHCPV6_PKT_HDR_LEN; // DHCPv6 header
+uint16_t Pkt6::len() {
+ uint16_t length = DHCPV6_PKT_HDR_LEN; // DHCPv6 header
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
@@ -88,43 +82,28 @@ Pkt6::pack() {
bool
Pkt6::packUDP() {
- unsigned short length = len();
- if (data_len_ < length) {
- cout << "Previous len=" << data_len_ << ", allocating new buffer: len="
- << length << endl;
-
- // May throw exception if out of memory. That is rather fatal,
- // so we don't catch this
- data_ = boost::shared_array<uint8_t>(new uint8_t[length]);
- data_len_ = length;
- }
- data_len_ = length;
+ // TODO: Once OutputBuffer is used here, some thing like this
+ // will be used. Yikes! That's ugly.
+ // bufferOut_.writeData(ciaddr_.getAddress().to_v6().to_bytes().data(), 16);
+ // It is better to implement a method in IOAddress that extracts
+ // vector<uint8_t>
+
try {
// DHCPv6 header: message-type (1 octect) + transaction id (3 octets)
- data_[0] = msg_type_;
-
+ bufferOut_.writeUint8(msg_type_);
// store 3-octet transaction-id
- data_[1] = (transid_ >> 16) & 0xff;
- data_[2] = (transid_ >> 8) & 0xff;
- data_[3] = (transid_) & 0xff;
+ bufferOut_.writeUint8( (transid_ >> 16) & 0xff );
+ bufferOut_.writeUint8( (transid_ >> 8) & 0xff );
+ bufferOut_.writeUint8( (transid_) & 0xff );
// the rest are options
- unsigned short offset = LibDHCP::packOptions6(data_, length,
- 4/*offset*/,
- options_);
-
- // sanity check
- if (offset != length) {
- isc_throw(OutOfRange, "Packet build failed: expected size="
- << length << ", actual len=" << offset);
- }
+ LibDHCP::packOptions6(bufferOut_, options_);
}
catch (const Exception& e) {
cout << "Packet build failed:" << e.what() << endl;
return (false);
}
- cout << "Packet built, len=" << len() << endl;
return (true);
}
@@ -150,8 +129,8 @@ Pkt6::unpack() {
bool
Pkt6::unpackUDP() {
- if (data_len_ < 4) {
- std::cout << "DHCPv6 packet truncated. Only " << data_len_
+ if (data_.size() < 4) {
+ std::cout << "DHCPv6 packet truncated. Only " << data_.size()
<< " bytes. Need at least 4." << std::endl;
return (false);
}
@@ -160,16 +139,13 @@ Pkt6::unpackUDP() {
((data_[2]) << 8) + (data_[3]);
transid_ = transid_ & 0xffffff;
- unsigned int offset = LibDHCP::unpackOptions6(data_,
- data_len_,
- 4, //offset
- data_len_ - 4,
- options_);
- if (offset != data_len_) {
- cout << "DHCPv6 packet contains trailing garbage. Parsed "
- << offset << " bytes, packet is " << data_len_ << " bytes."
- << endl;
- // just a warning. Ignore trailing garbage and continue
+ try {
+ OptionBuffer opt_buffer(data_.begin() + 4, data_.end());
+
+ LibDHCP::unpackOptions6(opt_buffer, options_);
+ } catch (const Exception& e) {
+ cout << "Packet parsing failed:" << e.what() << endl;
+ return (false);
}
return (true);
}
@@ -189,7 +165,7 @@ Pkt6::toText() {
<< "]:" << remote_port_ << endl;
tmp << "msgtype=" << msg_type_ << ", transid=0x" << hex << transid_
<< dec << endl;
- for (isc::dhcp::Option::Option6Collection::iterator opt=options_.begin();
+ for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
opt != options_.end();
++opt) {
tmp << opt->second->toText() << std::endl;
@@ -198,8 +174,8 @@ Pkt6::toText() {
}
boost::shared_ptr<isc::dhcp::Option>
-Pkt6::getOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::const_iterator x = options_.find(opt_type);
+Pkt6::getOption(uint16_t opt_type) {
+ isc::dhcp::Option::OptionCollection::const_iterator x = options_.find(opt_type);
if (x!=options_.end()) {
return (*x).second;
}
@@ -212,8 +188,8 @@ Pkt6::addOption(boost::shared_ptr<Option> opt) {
}
bool
-Pkt6::delOption(unsigned short type) {
- isc::dhcp::Option::Option6Collection::iterator x = options_.find(type);
+Pkt6::delOption(uint16_t type) {
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(type);
if (x!=options_.end()) {
options_.erase(x);
return (true); // delete successful
@@ -221,4 +197,17 @@ Pkt6::delOption(unsigned short type) {
return (false); // can't find option to be deleted
}
-};
+void Pkt6::repack() {
+ cout << "Convering RX packet to TX packet: " << data_.size() << " bytes." << endl;
+
+ bufferOut_.writeData(&data_[0], data_.size());
+}
+
+void
+Pkt6::updateTimestamp() {
+ timestamp_ = boost::posix_time::microsec_clock::universal_time();
+}
+
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h
index d089444..2612f27 100644
--- a/src/lib/dhcp/pkt6.h
+++ b/src/lib/dhcp/pkt6.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -16,8 +16,10 @@
#define PKT6_H
#include <iostream>
+#include <time.h>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp>
#include "asiolink/io_address.h"
#include "dhcp/option.h"
@@ -41,17 +43,18 @@ public:
/// @param msg_type type of message (SOLICIT=1, ADVERTISE=2, ...)
/// @param transid transaction-id
/// @param proto protocol (TCP or UDP)
- Pkt6(unsigned char msg_type,
- unsigned int transid,
+ Pkt6(uint8_t msg_type,
+ uint32_t transid,
DHCPv6Proto proto = UDP);
/// Constructor, used in message transmission
///
/// Creates new message. Transaction-id will randomized.
///
- /// @param len size of buffer to be allocated for this packet.
+ /// @param buf pointer to a buffer of received packet content
+ /// @param len size of buffer of received packet content
/// @param proto protocol (usually UDP, but TCP will be supported eventually)
- Pkt6(unsigned int len, DHCPv6Proto proto = UDP);
+ Pkt6(const uint8_t* buf, uint32_t len, DHCPv6Proto proto = UDP);
/// @brief Prepares on-wire format.
///
@@ -61,8 +64,7 @@ public:
/// will be set in data_len_.
///
/// @return true if packing procedure was successful
- bool
- pack();
+ bool pack();
/// @brief Dispatch method that handles binary packet parsing.
///
@@ -70,59 +72,79 @@ public:
/// unpackTCP).
///
/// @return true if parsing was successful
- bool
- unpack();
+ bool unpack();
- /// Returns protocol of this packet (UDP or TCP)
+ /// @brief Returns reference to output buffer.
+ ///
+ /// Returned buffer will contain reasonable data only for
+ /// output (TX) packet and after pack() was called. This buffer
+ /// is only valid till Pkt6 object is valid.
+ ///
+ /// RX packet or TX packet before pack() will return buffer with
+ /// zero length
+ ///
+ /// @return reference to output buffer
+ const isc::util::OutputBuffer& getBuffer() const { return (bufferOut_); };
+
+
+ /// @brief Returns reference to input buffer.
+ ///
+ /// @return reference to input buffer
+ const OptionBuffer& getData() const { return(data_); }
+
+ /// @brief Returns protocol of this packet (UDP or TCP).
///
/// @return protocol type
- DHCPv6Proto
- getProto();
+ DHCPv6Proto getProto();
/// Sets protocol of this packet.
///
/// @param proto protocol (UDP or TCP)
- ///
- void
- setProto(DHCPv6Proto proto = UDP) { proto_ = proto; }
+ void setProto(DHCPv6Proto proto = UDP) { proto_ = proto; }
/// @brief Returns text representation of the packet.
///
/// This function is useful mainly for debugging.
///
/// @return string with text representation
- std::string
- toText();
+ std::string toText();
- /// @brief Returns calculated length of the packet.
+ /// @brief Returns length of the packet.
///
- /// This function returns size of required buffer to buld this packet.
- /// To use that function, options_ field must be set.
+ /// This function returns size required to hold this packet.
+ /// It includes DHCPv6 header and all options stored in
+ /// options_ field.
///
- /// @return number of bytes required to build this packet
- unsigned short
- len();
+ /// Note: It does not return proper length of incoming packets
+ /// before they are unpacked.
+ ///
+ /// @return number of bytes required to assemble this packet
+ uint16_t len();
/// Returns message type (e.g. 1 = SOLICIT)
///
/// @return message type
- unsigned char
- getType() { return (msg_type_); }
+ uint8_t getType() { return (msg_type_); }
/// Sets message type (e.g. 1 = SOLICIT)
///
/// @param type message type to be set
- void setType(unsigned char type) { msg_type_=type; };
+ void setType(uint8_t type) { msg_type_=type; };
+
+ /// @brief Sets transaction-id value
+ ///
+ /// @param transid transaction-id to be set.
+ void setTransid(uint32_t transid) { transid_ = transid; }
/// Returns value of transaction-id field
///
/// @return transaction-id
- unsigned int getTransid() { return (transid_); };
+ uint32_t getTransid() { return (transid_); };
/// Adds an option to this packet.
///
/// @param opt option to be added.
- void addOption(boost::shared_ptr<isc::dhcp::Option> opt);
+ void addOption(OptionPtr opt);
/// @brief Returns the first option of specified type.
///
@@ -130,57 +152,116 @@ public:
/// instances of the same option are allowed (and frequently used).
/// See getOptions().
///
- /// @param opt_type option type we are looking for
+ /// @param type option type we are looking for
///
/// @return pointer to found option (or NULL)
- boost::shared_ptr<isc::dhcp::Option>
- getOption(unsigned short type);
+ OptionPtr getOption(uint16_t type);
/// Attempts to delete first suboption of requested type
///
/// @param type Type of option to be deleted.
///
/// @return true if option was deleted, false if no such option existed
- bool
- delOption(unsigned short type);
+ bool delOption(uint16_t type);
- /// TODO need getter/setter wrappers
- /// and hide following fields as protected
+ /// @brief This method copies data from output buffer to input buffer
+ ///
+ /// This is useful only in testing
+ void repack();
- /// buffer that holds memory. It is shared_array as options may
- /// share pointer to this buffer
- boost::shared_array<uint8_t> data_;
+ /// @brief Sets remote address.
+ ///
+ /// @param remote specifies remote address
+ void setRemoteAddr(const isc::asiolink::IOAddress& remote) { remote_addr_ = remote; }
- /// length of the data
- unsigned int data_len_;
+ /// @brief Returns remote address
+ ///
+ /// @return remote address
+ const isc::asiolink::IOAddress& getRemoteAddr() { return (remote_addr_); }
- /// local address (dst if receiving packet, src if sending packet)
- isc::asiolink::IOAddress local_addr_;
+ /// @brief Sets local address.
+ ///
+ /// @param local specifies local address
+ void setLocalAddr(const isc::asiolink::IOAddress& local) { local_addr_ = local; }
- /// remote address (src if receiving packet, dst if sending packet)
- isc::asiolink::IOAddress remote_addr_;
+ /// @brief Returns local address.
+ ///
+ /// @return local address
+ const isc::asiolink::IOAddress& getLocalAddr() { return (local_addr_); }
- /// name of the network interface the packet was received/to be sent over
- std::string iface_;
+ /// @brief Sets local port.
+ ///
+ /// @param local specifies local port
+ void setLocalPort(uint16_t local) { local_port_ = local; }
- /// @brief interface index
+ /// @brief Returns local port.
///
- /// interface index (each network interface has assigned unique ifindex
- /// it is functional equvalent of name, but sometimes more useful, e.g.
- /// when using crazy systems that allow spaces in interface names
- /// e.g. windows
- int ifindex_;
+ /// @return local port
+ uint16_t getLocalPort() { return (local_port_); }
- /// local TDP or UDP port
- int local_port_;
+ /// @brief Sets remote port.
+ ///
+ /// @param remote specifies remote port
+ void setRemotePort(uint16_t remote) { remote_port_ = remote; }
- /// remote TCP or UDP port
- int remote_port_;
+ /// @brief Returns remote port.
+ ///
+ /// @return remote port
+ uint16_t getRemotePort() { return (remote_port_); }
+
+ /// @brief Sets interface index.
+ ///
+ /// @param ifindex specifies interface index.
+ void setIndex(uint32_t ifindex) { ifindex_ = ifindex; };
+
+ /// @brief Returns interface index.
+ ///
+ /// @return interface index
+ uint32_t getIndex() const { return (ifindex_); };
+
+ /// @brief Returns interface name.
+ ///
+ /// Returns interface name over which packet was received or is
+ /// going to be transmitted.
+ ///
+ /// @return interface name
+ std::string getIface() const { return iface_; };
+
+ /// @brief Returns packet timestamp.
+ ///
+ /// Returns packet timestamp value updated when
+ /// packet is received or send.
+ ///
+ /// @return packet timestamp.
+ const boost::posix_time::ptime& getTimestamp() const { return timestamp_; }
+
+ /// @brief Sets interface name.
+ ///
+ /// Sets interface name over which packet was received or is
+ /// going to be transmitted.
+ ///
+ /// @return interface name
+ void setIface(const std::string& iface ) { iface_ = iface; };
/// TODO Need to implement getOptions() as well
/// collection of options present in this message
- isc::dhcp::Option::Option6Collection options_;
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt6. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
+ isc::dhcp::Option::OptionCollection options_;
+
+ /// @brief Update packet timestamp.
+ ///
+ /// Updates packet timestamp. This method is invoked
+ /// by interface manager just before sending or
+ /// just after receiving it.
+ /// @throw isc::Unexpected if timestamp update failed
+ void updateTimestamp();
protected:
/// Builds on wire packet for TCP transmission.
@@ -221,12 +302,60 @@ protected:
DHCPv6Proto proto_;
/// DHCPv6 message type
- int msg_type_;
+ uint8_t msg_type_;
/// DHCPv6 transaction-id
- unsigned int transid_;
+ uint32_t transid_;
+
+ /// unparsed data (in received packets)
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt6. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
+ OptionBuffer data_;
+
+ /// name of the network interface the packet was received/to be sent over
+ std::string iface_;
+
+ /// @brief interface index
+ ///
+ /// interface index (each network interface has assigned unique ifindex
+ /// it is functional equvalent of name, but sometimes more useful, e.g.
+ /// when using crazy systems that allow spaces in interface names
+ /// e.g. windows
+ int ifindex_;
+
+ /// local address (dst if receiving packet, src if sending packet)
+ isc::asiolink::IOAddress local_addr_;
+
+ /// remote address (src if receiving packet, dst if sending packet)
+ isc::asiolink::IOAddress remote_addr_;
+
+ /// local TDP or UDP port
+ uint16_t local_port_;
+
+ /// remote TCP or UDP port
+ uint16_t remote_port_;
+
+ /// output buffer (used during message transmission)
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt6. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
+ isc::util::OutputBuffer bufferOut_;
+
+ /// packet timestamp
+ boost::posix_time::ptime timestamp_;
}; // Pkt6 class
+typedef boost::shared_ptr<Pkt6> Pkt6Ptr;
+
} // isc::dhcp namespace
} // isc namespace
diff --git a/src/lib/dhcp/tests/.gitignore b/src/lib/dhcp/tests/.gitignore
new file mode 100644
index 0000000..313429d
--- /dev/null
+++ b/src/lib/dhcp/tests/.gitignore
@@ -0,0 +1 @@
+/libdhcp++_unittests
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 41cabba..46225e4 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -1,43 +1,61 @@
SUBDIRS = .
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/asiolink
AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/lib/dhcp/tests\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
+
AM_CXXFLAGS = $(B10_CXXFLAGS)
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
if USE_STATIC_LINK
AM_LDFLAGS = -static
endif
CLEANFILES = *.gcno *.gcda
+TESTS_ENVIRONMENT = \
+ $(LIBTOOL) --mode=execute $(VALGRIND_COMMAND)
+
TESTS =
if HAVE_GTEST
-TESTS += libdhcp_unittests
-libdhcp_unittests_SOURCES = run_unittests.cc
-libdhcp_unittests_SOURCES += ../libdhcp.h ../libdhcp.cc libdhcp_unittest.cc
-libdhcp_unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
-libdhcp_unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
-libdhcp_unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
-libdhcp_unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
-libdhcp_unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
-libdhcp_unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
-
-libdhcp_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
-libdhcp_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-
-libdhcp_unittests_CXXFLAGS = $(AM_CXXFLAGS)
+TESTS += libdhcp++_unittests
+libdhcp___unittests_SOURCES = run_unittests.cc
+libdhcp___unittests_SOURCES += ../libdhcp++.h ../libdhcp++.cc
+libdhcp___unittests_SOURCES += libdhcp++_unittest.cc
+libdhcp___unittests_SOURCES += ../iface_mgr.cc ../iface_mgr.h iface_mgr_unittest.cc
+libdhcp___unittests_SOURCES += ../iface_mgr_linux.cc
+libdhcp___unittests_SOURCES += ../iface_mgr_bsd.cc
+libdhcp___unittests_SOURCES += ../iface_mgr_sun.cc
+libdhcp___unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
+libdhcp___unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
+libdhcp___unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
+libdhcp___unittests_SOURCES += ../option4_addrlst.cc ../option4_addrlst.h option4_addrlst_unittest.cc
+libdhcp___unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
+libdhcp___unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
+libdhcp___unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
+
+libdhcp___unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
+libdhcp___unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+libdhcp___unittests_CXXFLAGS = $(AM_CXXFLAGS)
+
if USE_CLANGPP
# This is to workaround unused variables tcout and tcerr in
-# log4cplus's streams.h.
-libdhcp_unittests_CXXFLAGS += -Wno-unused-variable
+# log4cplus's streams.h and unused parameters from some of the
+# Boost headers.
+libdhcp___unittests_CXXFLAGS += -Wno-unused-variable -Wno-unused-parameter
endif
-libdhcp_unittests_LDADD = $(GTEST_LDADD)
-libdhcp_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
-libdhcp_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
-libdhcp_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-libdhcp_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+libdhcp___unittests_LDADD = $(GTEST_LDADD)
+libdhcp___unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+libdhcp___unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+libdhcp___unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+libdhcp___unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/dhcp/tests/iface_mgr_unittest.cc b/src/lib/dhcp/tests/iface_mgr_unittest.cc
new file mode 100644
index 0000000..e7ccb68
--- /dev/null
+++ b/src/lib/dhcp/tests/iface_mgr_unittest.cc
@@ -0,0 +1,951 @@
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <fstream>
+#include <sstream>
+
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <asiolink/io_address.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/iface_mgr.h>
+#include <dhcp/dhcp4.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+// name of loopback interface detection
+const size_t buf_size = 32;
+char LOOPBACK[buf_size] = "lo";
+
+namespace {
+
+class NakedIfaceMgr: public IfaceMgr {
+ // "naked" Interface Manager, exposes internal fields
+public:
+ NakedIfaceMgr() { }
+ IfaceCollection & getIfacesLst() { return ifaces_; }
+};
+
+// dummy class for now, but this will be expanded when needed
+class IfaceMgrTest : public ::testing::Test {
+public:
+ // these are empty for now, but let's keep them around
+ IfaceMgrTest() {
+ }
+
+ ~IfaceMgrTest() {
+ }
+};
+
+// We need some known interface to work reliably. Loopback interface
+// is named lo on Linux and lo0 on BSD boxes. We need to find out
+// which is available. This is not a real test, but rather a workaround
+// that will go away when interface detection is implemented.
+
+// NOTE: At this stage of development, write access to current directory
+// during running tests is required.
+TEST_F(IfaceMgrTest, loDetect) {
+
+ // poor man's interface detection
+ // it will go away as soon as proper interface detection
+ // is implemented
+ if (if_nametoindex("lo") > 0) {
+ cout << "This is Linux, using lo as loopback." << endl;
+ snprintf(LOOPBACK, buf_size - 1, "lo");
+ } else if (if_nametoindex("lo0") > 0) {
+ cout << "This is BSD, using lo0 as loopback." << endl;
+ snprintf(LOOPBACK, buf_size - 1, "lo0");
+ } else {
+ cout << "Failed to detect loopback interface. Neither "
+ << "lo nor lo0 worked. I give up." << endl;
+ FAIL();
+ }
+}
+
+// uncomment this test to create packet writer. It will
+// write incoming DHCPv6 packets as C arrays. That is useful
+// for generating test sequences based on actual traffic
+//
+// TODO: this potentially should be moved to a separate tool
+//
+
+#if 0
+TEST_F(IfaceMgrTest, dhcp6Sniffer) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ unlink("interfaces.txt");
+
+ ofstream interfaces("interfaces.txt", ios::ate);
+ interfaces << "eth0 fe80::21e:8cff:fe9b:7349";
+ interfaces.close();
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ Pkt6* pkt = NULL;
+ int cnt = 0;
+ cout << "---8X-----------------------------------------" << endl;
+ while (true) {
+ pkt = ifacemgr->receive();
+
+ cout << "// this code is autogenerated. Do NOT edit." << endl;
+ cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
+ cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
+ cout << " Pkt6* pkt;" << endl;
+ cout << " pkt = new Pkt6(" << pkt->data_len_ << ");" << endl;
+ cout << " pkt->remote_port_ = " << pkt-> remote_port_ << ";" << endl;
+ cout << " pkt->remote_addr_ = IOAddress(\""
+ << pkt->remote_addr_.toText() << "\");" << endl;
+ cout << " pkt->local_port_ = " << pkt-> local_port_ << ";" << endl;
+ cout << " pkt->local_addr_ = IOAddress(\""
+ << pkt->local_addr_.toText() << "\");" << endl;
+ cout << " pkt->ifindex_ = " << pkt->ifindex_ << ";" << endl;
+ cout << " pkt->iface_ = \"" << pkt->iface_ << "\";" << endl;
+
+ // TODO it is better to declare statically initialize the array
+ // and then memcpy it to packet.
+ for (int i=0; i< pkt->data_len_; i++) {
+ cout << " pkt->data_[" << i << "]="
+ << (int)(unsigned char)pkt->data_[i] << "; ";
+ if (!(i%4))
+ cout << endl;
+ }
+ cout << endl;
+ cout << " return (pkt);" << endl;
+ cout << "}" << endl << endl;
+
+ delete pkt;
+ }
+ cout << "---8X-----------------------------------------" << endl;
+
+ // never happens. Infinite loop is infinite
+ delete pkt;
+ delete ifacemgr;
+}
+#endif
+
+TEST_F(IfaceMgrTest, basic) {
+ // checks that IfaceManager can be instantiated
+
+ IfaceMgr & ifacemgr = IfaceMgr::instance();
+ ASSERT_TRUE(&ifacemgr != 0);
+}
+
+TEST_F(IfaceMgrTest, ifaceClass) {
+ // basic tests for Iface inner class
+
+ IfaceMgr::Iface* iface = new IfaceMgr::Iface("eth5", 7);
+
+ EXPECT_STREQ("eth5/7", iface->getFullName().c_str());
+
+ delete iface;
+}
+
+// TODO: Implement getPlainMac() test as soon as interface detection
+// is implemented.
+TEST_F(IfaceMgrTest, getIface) {
+
+ cout << "Interface checks. Please ignore socket binding errors." << endl;
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // interface name, ifindex
+ IfaceMgr::Iface iface1("lo1", 100);
+ IfaceMgr::Iface iface2("eth9", 101);
+ IfaceMgr::Iface iface3("en3", 102);
+ IfaceMgr::Iface iface4("e1000g4", 103);
+ cout << "This test assumes that there are less than 100 network interfaces"
+ << " in the tested system and there are no lo1, eth9, en3, e1000g4"
+ << " or wifi15 interfaces present." << endl;
+
+ // note: real interfaces may be detected as well
+ ifacemgr->getIfacesLst().push_back(iface1);
+ ifacemgr->getIfacesLst().push_back(iface2);
+ ifacemgr->getIfacesLst().push_back(iface3);
+ ifacemgr->getIfacesLst().push_back(iface4);
+
+ cout << "There are " << ifacemgr->getIfacesLst().size()
+ << " interfaces." << endl;
+ for (IfaceMgr::IfaceCollection::iterator iface=ifacemgr->getIfacesLst().begin();
+ iface != ifacemgr->getIfacesLst().end();
+ ++iface) {
+ cout << " " << iface->getFullName() << endl;
+ }
+
+
+ // check that interface can be retrieved by ifindex
+ IfaceMgr::Iface* tmp = ifacemgr->getIface(102);
+ ASSERT_TRUE(tmp != NULL);
+
+ EXPECT_EQ("en3", tmp->getName());
+ EXPECT_EQ(102, tmp->getIndex());
+
+ // check that interface can be retrieved by name
+ tmp = ifacemgr->getIface("lo1");
+ ASSERT_TRUE(tmp != NULL);
+
+ EXPECT_EQ("lo1", tmp->getName());
+ EXPECT_EQ(100, tmp->getIndex());
+
+ // check that non-existing interfaces are not returned
+ EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi15") );
+
+ delete ifacemgr;
+
+}
+
+TEST_F(IfaceMgrTest, sockets6) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ IOAddress loAddr("::1");
+
+ Pkt6 pkt6(DHCPV6_SOLICIT, 123);
+ pkt6.setIface(LOOPBACK);
+
+ // bind multicast socket to port 10547
+ int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ EXPECT_GT(socket1, 0); // socket > 0
+
+ EXPECT_EQ(socket1, ifacemgr->getSocket(pkt6));
+
+ // bind unicast socket to port 10548
+ int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
+ EXPECT_GT(socket2, 0);
+
+ // removed code for binding socket twice to the same address/port
+ // as it caused problems on some platforms (e.g. Mac OS X)
+
+ close(socket1);
+ close(socket2);
+
+ delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+TEST_F(IfaceMgrTest, DISABLED_sockets6Mcast) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ IOAddress loAddr("::1");
+ IOAddress mcastAddr("ff02::1:2");
+
+ // bind multicast socket to port 10547
+ int socket1 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+ EXPECT_GT(socket1, 0); // socket > 0
+
+ // expect success. This address/port is already bound, but
+ // we are using SO_REUSEADDR, so we can bind it twice
+ int socket2 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+ EXPECT_GT(socket2, 0);
+
+ // there's no good way to test negative case here.
+ // we would need non-multicast interface. We will be able
+ // to iterate thru available interfaces and check if there
+ // are interfaces without multicast-capable flag.
+
+ close(socket1);
+ close(socket2);
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, sendReceive6) {
+
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // let's assume that every supported OS have lo interface
+ IOAddress loAddr("::1");
+ int socket1 = 0, socket2 = 0;
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+ );
+
+ EXPECT_GT(socket1, 0);
+ EXPECT_GT(socket2, 0);
+
+
+ // prepare dummy payload
+ uint8_t data[128];
+ for (int i = 0; i < 128; i++) {
+ data[i] = i;
+ }
+ Pkt6Ptr sendPkt = Pkt6Ptr(new Pkt6(data, 128));
+
+ sendPkt->repack();
+
+ sendPkt->setRemotePort(10547);
+ sendPkt->setRemoteAddr(IOAddress("::1"));
+ sendPkt->setIndex(1);
+ sendPkt->setIface(LOOPBACK);
+
+ Pkt6Ptr rcvPkt;
+
+ EXPECT_EQ(true, ifacemgr->send(sendPkt));
+
+ rcvPkt = ifacemgr->receive6();
+
+ ASSERT_TRUE(rcvPkt); // received our own packet
+
+ // let's check that we received what was sent
+ ASSERT_EQ(sendPkt->getData().size(), rcvPkt->getData().size());
+ EXPECT_EQ(0, memcmp(&sendPkt->getData()[0], &rcvPkt->getData()[0],
+ rcvPkt->getData().size()));
+
+ EXPECT_EQ(sendPkt->getRemoteAddr().toText(), rcvPkt->getRemoteAddr().toText());
+
+ // since we opened 2 sockets on the same interface and none of them is multicast,
+ // none is preferred over the other for sending data, so we really should not
+ // assume the one or the other will always be choosen for sending data. Therefore
+ // we should accept both values as source ports.
+ EXPECT_TRUE((rcvPkt->getRemotePort() == 10546) || (rcvPkt->getRemotePort() == 10547));
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, sendReceive4) {
+
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // let's assume that every supported OS have lo interface
+ IOAddress loAddr("127.0.0.1");
+ int socket1 = 0, socket2 = 0;
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000);
+ socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000 + 1);
+ );
+
+ EXPECT_GE(socket1, 0);
+ EXPECT_GE(socket2, 0);
+
+ boost::shared_ptr<Pkt4> sendPkt(new Pkt4(DHCPDISCOVER, 1234) );
+
+ sendPkt->setLocalAddr(IOAddress("127.0.0.1"));
+
+ sendPkt->setLocalPort(DHCP4_SERVER_PORT + 10000 + 1);
+ sendPkt->setRemotePort(DHCP4_SERVER_PORT + 10000);
+ sendPkt->setRemoteAddr(IOAddress("127.0.0.1"));
+ sendPkt->setIndex(1);
+ sendPkt->setIface(string(LOOPBACK));
+ sendPkt->setHops(6);
+ sendPkt->setSecs(42);
+ sendPkt->setCiaddr(IOAddress("192.0.2.1"));
+ sendPkt->setSiaddr(IOAddress("192.0.2.2"));
+ sendPkt->setYiaddr(IOAddress("192.0.2.3"));
+ sendPkt->setGiaddr(IOAddress("192.0.2.4"));
+
+ // unpack() now checks if mandatory DHCP_MESSAGE_TYPE is present
+ boost::shared_ptr<Option> msgType(new Option(Option::V4,
+ static_cast<uint16_t>(DHO_DHCP_MESSAGE_TYPE)));
+ msgType->setUint8(static_cast<uint8_t>(DHCPDISCOVER));
+ sendPkt->addOption(msgType);
+
+ uint8_t sname[] = "That's just a string that will act as SNAME";
+ sendPkt->setSname(sname, strlen((const char*)sname));
+ uint8_t file[] = "/another/string/that/acts/as/a/file_name.txt";
+ sendPkt->setFile(file, strlen((const char*)file));
+
+ ASSERT_NO_THROW(
+ sendPkt->pack();
+ );
+
+ boost::shared_ptr<Pkt4> rcvPkt;
+
+ EXPECT_EQ(true, ifacemgr->send(sendPkt));
+
+ rcvPkt = ifacemgr->receive4(10);
+
+ ASSERT_TRUE(rcvPkt); // received our own packet
+
+ ASSERT_NO_THROW(
+ rcvPkt->unpack();
+ );
+
+ // let's check that we received what was sent
+ EXPECT_EQ(sendPkt->len(), rcvPkt->len());
+
+ EXPECT_EQ("127.0.0.1", rcvPkt->getRemoteAddr().toText());
+ EXPECT_EQ(sendPkt->getRemotePort(), rcvPkt->getLocalPort());
+
+ // now let's check content
+ EXPECT_EQ(sendPkt->getHops(), rcvPkt->getHops());
+ EXPECT_EQ(sendPkt->getOp(), rcvPkt->getOp());
+ EXPECT_EQ(sendPkt->getSecs(), rcvPkt->getSecs());
+ EXPECT_EQ(sendPkt->getFlags(), rcvPkt->getFlags());
+ EXPECT_EQ(sendPkt->getCiaddr(), rcvPkt->getCiaddr());
+ EXPECT_EQ(sendPkt->getSiaddr(), rcvPkt->getSiaddr());
+ EXPECT_EQ(sendPkt->getYiaddr(), rcvPkt->getYiaddr());
+ EXPECT_EQ(sendPkt->getGiaddr(), rcvPkt->getGiaddr());
+ EXPECT_EQ(sendPkt->getTransid(), rcvPkt->getTransid());
+ EXPECT_EQ(sendPkt->getType(), rcvPkt->getType());
+ EXPECT_TRUE(sendPkt->getSname() == rcvPkt->getSname());
+ EXPECT_TRUE(sendPkt->getFile() == rcvPkt->getFile());
+ EXPECT_EQ(sendPkt->getHtype(), rcvPkt->getHtype());
+ EXPECT_EQ(sendPkt->getHlen(), rcvPkt->getHlen());
+
+ // since we opened 2 sockets on the same interface and none of them is multicast,
+ // none is preferred over the other for sending data, so we really should not
+ // assume the one or the other will always be choosen for sending data. We should
+ // skip checking source port of sent address.
+
+ delete ifacemgr;
+}
+
+
+TEST_F(IfaceMgrTest, socket4) {
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // Let's assume that every supported OS have lo interface.
+ IOAddress loAddr("127.0.0.1");
+ // Use unprivileged port (it's convenient for running tests as non-root).
+ int socket1 = 0;
+
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000);
+ );
+
+ EXPECT_GT(socket1, 0);
+
+ Pkt4 pkt(DHCPDISCOVER, 1234);
+ pkt.setIface(LOOPBACK);
+
+ // Expect that we get the socket that we just opened.
+ EXPECT_EQ(socket1, ifacemgr->getSocket(pkt));
+
+ close(socket1);
+
+ delete ifacemgr;
+}
+
+// Test the Iface structure itself
+TEST_F(IfaceMgrTest, iface) {
+ IfaceMgr::Iface* iface = NULL;
+ EXPECT_NO_THROW(
+ iface = new IfaceMgr::Iface("eth0",1);
+ );
+
+ EXPECT_EQ("eth0", iface->getName());
+ EXPECT_EQ(1, iface->getIndex());
+ EXPECT_EQ("eth0/1", iface->getFullName());
+
+ // Let's make a copy of this address collection.
+ IfaceMgr::AddressCollection addrs = iface->getAddresses();
+
+ EXPECT_EQ(0, addrs.size());
+
+ IOAddress addr1("192.0.2.6");
+ iface->addAddress(addr1);
+
+ addrs = iface->getAddresses();
+ ASSERT_EQ(1, addrs.size());
+ EXPECT_EQ("192.0.2.6", addrs.at(0).toText());
+
+ // No such address, should return false.
+ EXPECT_FALSE(iface->delAddress(IOAddress("192.0.8.9")));
+
+ // This address is present, delete it!
+ EXPECT_TRUE(iface->delAddress(IOAddress("192.0.2.6")));
+
+ // Not really necessary, previous reference still points to the same
+ // collection. Let's do it anyway, as test code may serve as example
+ // usage code as well.
+ addrs = iface->getAddresses();
+
+ EXPECT_EQ(0, addrs.size());
+
+ EXPECT_NO_THROW(
+ delete iface;
+ );
+}
+
+TEST_F(IfaceMgrTest, iface_methods) {
+ IfaceMgr::Iface iface("foo", 1234);
+
+ iface.setHWType(42);
+ EXPECT_EQ(42, iface.getHWType());
+
+ uint8_t mac[IfaceMgr::MAX_MAC_LEN+10];
+ for (int i = 0; i < IfaceMgr::MAX_MAC_LEN + 10; i++)
+ mac[i] = 255 - i;
+
+ EXPECT_EQ("foo", iface.getName());
+ EXPECT_EQ(1234, iface.getIndex());
+
+ // MAC is too long. Exception should be thrown and
+ // MAC length should not be set.
+ EXPECT_THROW(
+ iface.setMac(mac, IfaceMgr::MAX_MAC_LEN + 1),
+ OutOfRange
+ );
+
+ // MAC length should stay not set as excep
+ EXPECT_EQ(0, iface.getMacLen());
+
+ // Setting maximum length MAC should be ok.
+ iface.setMac(mac, IfaceMgr::MAX_MAC_LEN);
+
+ // For some reason constants cannot be used directly in EXPECT_EQ
+ // as this produces linking error.
+ size_t len = IfaceMgr::MAX_MAC_LEN;
+ EXPECT_EQ(len, iface.getMacLen());
+ EXPECT_EQ(0, memcmp(mac, iface.getMac(), iface.getMacLen()));
+}
+
+TEST_F(IfaceMgrTest, socketInfo) {
+
+ // check that socketinfo for IPv4 socket is functional
+ IfaceMgr::SocketInfo sock1(7, IOAddress("192.0.2.56"), DHCP4_SERVER_PORT + 7);
+ EXPECT_EQ(7, sock1.sockfd_);
+ EXPECT_EQ("192.0.2.56", sock1.addr_.toText());
+ EXPECT_EQ(AF_INET, sock1.family_);
+ EXPECT_EQ(DHCP4_SERVER_PORT + 7, sock1.port_);
+
+ // check that socketinfo for IPv6 socket is functional
+ IfaceMgr::SocketInfo sock2(9, IOAddress("2001:db8:1::56"), DHCP4_SERVER_PORT + 9);
+ EXPECT_EQ(9, sock2.sockfd_);
+ EXPECT_EQ("2001:db8:1::56", sock2.addr_.toText());
+ EXPECT_EQ(AF_INET6, sock2.family_);
+ EXPECT_EQ(DHCP4_SERVER_PORT + 9, sock2.port_);
+
+ // now let's test if IfaceMgr handles socket info properly
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+ IfaceMgr::Iface* loopback = ifacemgr->getIface(LOOPBACK);
+ ASSERT_TRUE(loopback);
+ loopback->addSocket(sock1);
+ loopback->addSocket(sock2);
+
+ Pkt6 pkt6(DHCPV6_REPLY, 123456);
+
+ // pkt6 dos not have interface set yet
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ BadValue
+ );
+
+ // try to send over non-existing interface
+ pkt6.setIface("nosuchinterface45");
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ BadValue
+ );
+
+ // this will work
+ pkt6.setIface(LOOPBACK);
+ EXPECT_EQ(9, ifacemgr->getSocket(pkt6));
+
+ bool deleted = false;
+ EXPECT_NO_THROW(
+ deleted = ifacemgr->getIface(LOOPBACK)->delSocket(9);
+ );
+ EXPECT_EQ(true, deleted);
+
+ // it should throw again, there's no usable socket anymore
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ Unexpected
+ );
+
+ // repeat for pkt4
+ Pkt4 pkt4(DHCPDISCOVER, 1);
+
+ // pkt4 does not have interface set yet.
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ BadValue
+ );
+
+ // Try to send over non-existing interface.
+ pkt4.setIface("nosuchinterface45");
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ BadValue
+ );
+
+ // Socket info is set, packet has well defined interface. It should work.
+ pkt4.setIface(LOOPBACK);
+ EXPECT_EQ(7, ifacemgr->getSocket(pkt4));
+
+ EXPECT_NO_THROW(
+ ifacemgr->getIface(LOOPBACK)->delSocket(7);
+ );
+
+ // It should throw again, there's no usable socket anymore.
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ Unexpected
+ );
+
+ delete ifacemgr;
+}
+
+#if defined(OS_LINUX)
+
+/// @brief parses text representation of MAC address
+///
+/// This function parses text representation of a MAC address and stores
+/// it in binary format. Text format is expecte to be separate with
+/// semicolons, e.g. f4:6d:04:96:58:f2
+///
+/// TODO: IfaceMgr::Iface::mac_ uses uint8_t* type, should be vector<uint8_t>
+///
+/// @param textMac string with MAC address to parse
+/// @param mac pointer to output buffer
+/// @param macLen length of output buffer
+///
+/// @return number of bytes filled in output buffer
+size_t parse_mac(const std::string& textMac, uint8_t* mac, size_t macLen) {
+ stringstream tmp(textMac);
+ tmp.flags(ios::hex);
+ int i = 0;
+ uint8_t octet = 0; // output octet
+ uint8_t byte; // parsed charater from text representation
+ while (!tmp.eof()) {
+
+ tmp >> byte; // hex value
+ if (byte == ':') {
+ mac[i++] = octet;
+
+ if (i == macLen) {
+ // parsing aborted. We hit output buffer size
+ return(i);
+ }
+ octet = 0;
+ continue;
+ }
+ if (isalpha(byte)) {
+ byte = toupper(byte) - 'A' + 10;
+ } else if (isdigit(byte)) {
+ byte -= '0';
+ } else {
+ // parse error. Let's return what we were able to parse so far
+ break;
+ }
+ octet <<= 4;
+ octet += byte;
+ }
+ mac[i++] = octet;
+
+ return (i);
+}
+
+/// @brief Parses 'ifconfig -a' output and creates list of interfaces
+///
+/// This method tries to parse ifconfig output. Note that there are some
+/// oddities in recent versions of ifconfig, like putting extra spaces
+/// after MAC address, inconsistent naming and spacing between inet and inet6.
+/// This is an attempt to find a balance between tight parsing of every piece
+/// of text that ifconfig prints and robustness to handle slight differences
+/// in ifconfig output.
+///
+/// @todo: Consider using isc::util::str::tokens here.
+///
+/// @param textFile name of a text file that holds output of ifconfig -a
+/// @param ifaces empty list of interfaces to be filled
+void parse_ifconfig(const std::string& textFile, IfaceMgr::IfaceCollection& ifaces) {
+ fstream f(textFile.c_str());
+
+ bool first_line = true;
+ IfaceMgr::IfaceCollection::iterator iface;
+ while (!f.eof()) {
+ string line;
+ getline(f, line);
+
+ // interfaces are separated by empty line
+ if (line.length() == 0) {
+ first_line = true;
+ continue;
+ }
+
+ // uncomment this for ifconfig output debug
+ // cout << "line[" << line << "]" << endl;
+
+ // this is first line of a new interface
+ if (first_line) {
+ first_line = false;
+
+ size_t offset;
+ offset = line.find_first_of(" ");
+ if (offset == string::npos) {
+ isc_throw(BadValue, "Malformed output of ifconfig");
+ }
+
+ // ifconfig in Gentoo prints out eth0: instead of eth0
+ if (line[offset - 1] == ':') {
+ offset--;
+ }
+ string name = line.substr(0, offset);
+
+ // sadly, ifconfig does not return ifindex
+ ifaces.push_back(IfaceMgr::Iface(name, 0));
+ iface = ifaces.end();
+ --iface; // points to the last element
+
+ offset = line.find(string("HWaddr"));
+
+ string mac = "";
+ if (offset != string::npos) { // some interfaces don't have MAC (e.g. lo)
+ offset += 7;
+ mac = line.substr(offset, string::npos);
+ mac = mac.substr(0, mac.find_first_of(" "));
+
+ uint8_t buf[IfaceMgr::MAX_MAC_LEN];
+ int mac_len = parse_mac(mac, buf, IfaceMgr::MAX_MAC_LEN);
+ iface->setMac(buf, mac_len);
+ }
+ }
+
+ if (line.find("inet6") != string::npos) {
+ // IPv6 address
+ string addr;
+ if (line.find("addr:", line.find("inet6")) != string::npos) {
+ // Ubuntu style format: inet6 addr: ::1/128 Scope:Host
+ addr = line.substr(line.find("addr:") + 6, string::npos);
+ } else {
+ // Gentoo style format: inet6 fe80::6ef0:49ff:fe96:ba17 prefixlen 64 scopeid 0x20<link>
+ addr = line.substr(line.find("inet6") + 6, string::npos);
+ }
+
+ // handle Ubuntu format: inet6 addr: fe80::f66d:4ff:fe96:58f2/64 Scope:Link
+ addr = addr.substr(0, addr.find("/"));
+
+ // handle inet6 fe80::ca3a:35ff:fed4:8f1d prefixlen 64 scopeid 0x20<link>
+ addr = addr.substr(0, addr.find(" "));
+ IOAddress a(addr);
+ iface->addAddress(a);
+ } else if(line.find("inet") != string::npos) {
+ // IPv4 address
+ string addr;
+ if (line.find("addr:", line.find("inet")) != string::npos) {
+ // Ubuntu style format: inet addr:127.0.0.1 Mask:255.0.0.0
+ addr = line.substr(line.find("addr:") + 5, string::npos);
+ } else {
+ // Gentoo style format: inet 10.53.0.4 netmask 255.255.255.0
+ addr = line.substr(line.find("inet") + 5, string::npos);
+ }
+
+ addr = addr.substr(0, addr.find_first_of(" "));
+ IOAddress a(addr);
+ iface->addAddress(a);
+ } else if(line.find("Metric")) {
+ // flags
+ if (line.find("UP") != string::npos) {
+ iface->flag_up_ = true;
+ }
+ if (line.find("LOOPBACK") != string::npos) {
+ iface->flag_loopback_ = true;
+ }
+ if (line.find("RUNNING") != string::npos) {
+ iface->flag_running_ = true;
+ }
+ if (line.find("BROADCAST") != string::npos) {
+ iface->flag_broadcast_ = true;
+ }
+ if (line.find("MULTICAST") != string::npos) {
+ iface->flag_multicast_ = true;
+ }
+ }
+ }
+}
+
+
+// This test compares implemented detection routines to output of "ifconfig -a" command.
+// It is far from perfect, but it is able to verify that interface names, flags,
+// MAC address, IPv4 and IPv6 addresses are detected properly. Interface list completeness
+// (check that each interface is reported, i.e. no missing or extra interfaces) and
+// address completeness is verified.
+//
+// Things that are not tested:
+// - ifindex (ifconfig does not print it out)
+// - address scopes and lifetimes (we don't need it, so it is not implemented in IfaceMgr)
+// TODO: temporarily disabled, see ticket #1529
+TEST_F(IfaceMgrTest, DISABLED_detectIfaces_linux) {
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+ IfaceMgr::IfaceCollection& detectedIfaces = ifacemgr->getIfacesLst();
+
+ const std::string textFile = "ifconfig.txt";
+
+ unlink(textFile.c_str());
+ int result = system( ("/sbin/ifconfig -a > " + textFile).c_str());
+
+ ASSERT_EQ(0, result);
+
+ // list of interfaces parsed from ifconfig
+ IfaceMgr::IfaceCollection parsedIfaces;
+
+ ASSERT_NO_THROW(
+ parse_ifconfig(textFile, parsedIfaces);
+ );
+ unlink(textFile.c_str());
+
+ cout << "------Parsed interfaces---" << endl;
+ for (IfaceMgr::IfaceCollection::iterator i = parsedIfaces.begin();
+ i != parsedIfaces.end(); ++i) {
+ cout << i->getName() << ": ifindex=" << i->getIndex() << ", mac=" << i->getPlainMac();
+ cout << ", flags:";
+ if (i->flag_up_) {
+ cout << " UP";
+ }
+ if (i->flag_running_) {
+ cout << " RUNNING";
+ }
+ if (i->flag_multicast_) {
+ cout << " MULTICAST";
+ }
+ if (i->flag_broadcast_) {
+ cout << " BROADCAST";
+ }
+ cout << ", addrs:";
+ const IfaceMgr::AddressCollection& addrs = i->getAddresses();
+ for (IfaceMgr::AddressCollection::const_iterator a= addrs.begin();
+ a != addrs.end(); ++a) {
+ cout << a->toText() << " ";
+ }
+ cout << endl;
+ }
+
+ // Ok, now we have 2 lists of interfaces. Need to compare them
+ ASSERT_EQ(detectedIfaces.size(), parsedIfaces.size());
+
+ // TODO: This could could probably be written simple with find()
+ for (IfaceMgr::IfaceCollection::iterator detected = detectedIfaces.begin();
+ detected != detectedIfaces.end(); ++detected) {
+ // let's find out if this interface is
+
+ bool found = false;
+ for (IfaceMgr::IfaceCollection::iterator i = parsedIfaces.begin();
+ i != parsedIfaces.end(); ++i) {
+ if (detected->getName() != i->getName()) {
+ continue;
+ }
+ found = true;
+
+ cout << "Checking interface " << detected->getName() << endl;
+
+ // start with checking flags
+ EXPECT_EQ(detected->flag_loopback_, i->flag_loopback_);
+ EXPECT_EQ(detected->flag_up_, i->flag_up_);
+ EXPECT_EQ(detected->flag_running_, i->flag_running_);
+ EXPECT_EQ(detected->flag_multicast_, i->flag_multicast_);
+ EXPECT_EQ(detected->flag_broadcast_, i->flag_broadcast_);
+
+ // skip MAC comparison for loopback as netlink returns MAC
+ // 00:00:00:00:00:00 for lo
+ if (!detected->flag_loopback_) {
+ ASSERT_EQ(detected->getMacLen(), i->getMacLen());
+ EXPECT_EQ(0, memcmp(detected->getMac(), i->getMac(), i->getMacLen()));
+ }
+
+ EXPECT_EQ(detected->getAddresses().size(), i->getAddresses().size());
+
+ // now compare addresses
+ const IfaceMgr::AddressCollection& addrs = detected->getAddresses();
+ for (IfaceMgr::AddressCollection::const_iterator addr = addrs.begin();
+ addr != addrs.end(); ++addr) {
+ bool addr_found = false;
+
+ const IfaceMgr::AddressCollection& addrs2 = detected->getAddresses();
+ for (IfaceMgr::AddressCollection::const_iterator a = addrs2.begin();
+ a != addrs2.end(); ++a) {
+ if (*addr != *a) {
+ continue;
+ }
+ addr_found = true;
+ }
+ if (!addr_found) {
+ cout << "ifconfig does not seem to report " << addr->toText()
+ << " address on " << detected->getFullName() << " interface." << endl;
+ FAIL();
+ }
+ cout << "Address " << addr->toText() << " on iterface " << detected->getFullName()
+ << " matched with 'ifconfig -a' output." << endl;
+ }
+ }
+ if (!found) { // corresponding interface was not found
+ FAIL();
+ }
+ }
+
+ delete ifacemgr;
+}
+#endif
+
+volatile bool callback_ok;
+
+void my_callback(void) {
+ cout << "Callback triggered." << endl;
+ callback_ok = true;
+}
+
+TEST_F(IfaceMgrTest, controlSession) {
+ // tests if extra control socket and its callback can be passed and
+ // it is supported properly by receive4() method.
+
+ callback_ok = false;
+
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // create pipe and register it as extra socket
+ int pipefd[2];
+ EXPECT_TRUE(pipe(pipefd) == 0);
+ EXPECT_NO_THROW(ifacemgr->set_session_socket(pipefd[0], my_callback));
+
+ Pkt4Ptr pkt4;
+ pkt4 = ifacemgr->receive4(1);
+
+ // Our callback should not be called this time (there was no data)
+ EXPECT_FALSE(callback_ok);
+
+ // IfaceMgr should not process control socket data as incoming packets
+ EXPECT_FALSE(pkt4);
+
+ // Now, send some data over pipe (38 bytes)
+ EXPECT_EQ(38, write(pipefd[1], "Hi, this is a message sent over a pipe", 38));
+
+ // ... and repeat
+ pkt4 = ifacemgr->receive4(1);
+
+ // IfaceMgr should not process control socket data as incoming packets
+ EXPECT_FALSE(pkt4);
+
+ // There was some data, so this time callback should be called
+ EXPECT_TRUE(callback_ok);
+
+ delete ifacemgr;
+
+ // close both pipe ends
+ close(pipefd[1]);
+ close(pipefd[0]);
+}
+
+}
diff --git a/src/lib/dhcp/tests/libdhcp++_unittest.cc b/src/lib/dhcp/tests/libdhcp++_unittest.cc
new file mode 100644
index 0000000..86f019e
--- /dev/null
+++ b/src/lib/dhcp/tests/libdhcp++_unittest.cc
@@ -0,0 +1,230 @@
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#else
+#include <arpa/inet.h>
+#endif
+#include <gtest/gtest.h>
+#include <util/buffer.h>
+#include <dhcp/libdhcp++.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+namespace {
+class LibDhcpTest : public ::testing::Test {
+public:
+ LibDhcpTest() {
+ }
+};
+
+static const uint8_t packed[] = {
+ 0, 12, 0, 5, 100, 101, 102, 103, 104, // opt1 (9 bytes)
+ 0, 13, 0, 3, 105, 106, 107, // opt2 (7 bytes)
+ 0, 14, 0, 2, 108, 109, // opt3 (6 bytes)
+ 1, 0, 0, 4, 110, 111, 112, 113, // opt4 (8 bytes)
+ 1, 1, 0, 1, 114 // opt5 (5 bytes)
+};
+
+TEST(LibDhcpTest, packOptions6) {
+ OptionBuffer buf(512);
+ isc::dhcp::Option::OptionCollection opts; // list of options
+
+ // generate content for options
+ for (int i = 0; i < 64; i++) {
+ buf[i]=i+100;
+ }
+
+ OptionPtr opt1(new Option(Option::V6, 12, buf.begin() + 0, buf.begin() + 5));
+ OptionPtr opt2(new Option(Option::V6, 13, buf.begin() + 5, buf.begin() + 8));
+ OptionPtr opt3(new Option(Option::V6, 14, buf.begin() + 8, buf.begin() + 10));
+ OptionPtr opt4(new Option(Option::V6,256, buf.begin() + 10,buf.begin() + 14));
+ OptionPtr opt5(new Option(Option::V6,257, buf.begin() + 14,buf.begin() + 15));
+
+ opts.insert(pair<int, OptionPtr >(opt1->getType(), opt1));
+ opts.insert(pair<int, OptionPtr >(opt1->getType(), opt2));
+ opts.insert(pair<int, OptionPtr >(opt1->getType(), opt3));
+ opts.insert(pair<int, OptionPtr >(opt1->getType(), opt4));
+ opts.insert(pair<int, OptionPtr >(opt1->getType(), opt5));
+
+ OutputBuffer assembled(512);
+
+ EXPECT_NO_THROW(LibDHCP::packOptions6(assembled, opts));
+ EXPECT_EQ(35, assembled.getLength()); // options should take 35 bytes
+ EXPECT_EQ(0, memcmp(assembled.getData(), packed, 35) );
+}
+
+TEST(LibDhcpTest, unpackOptions6) {
+
+ // just couple of random options
+ // Option is used as a simple option implementation
+ // More advanced uses are validated in tests dedicated for
+ // specific derived classes.
+ isc::dhcp::Option::OptionCollection options; // list of options
+
+ OptionBuffer buf(512);
+ memcpy(&buf[0], packed, 35);
+
+ EXPECT_NO_THROW ({
+ LibDHCP::unpackOptions6(OptionBuffer(buf.begin(), buf.begin()+35), options);
+ });
+
+ EXPECT_EQ(options.size(), 5); // there should be 5 options
+
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(12, x->second->getType()); // this should be option 12
+ ASSERT_EQ(9, x->second->len()); // it should be of length 9
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+4, 5)); // data len=5
+
+ x = options.find(13);
+ ASSERT_FALSE(x == options.end()); // option 13 should exist
+ EXPECT_EQ(13, x->second->getType()); // this should be option 13
+ ASSERT_EQ(7, x->second->len()); // it should be of length 7
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+13, 3)); // data len=3
+
+ x = options.find(14);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(14, x->second->getType()); // this should be option 14
+ ASSERT_EQ(6, x->second->len()); // it should be of length 6
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+20, 2)); // data len=2
+
+ x = options.find(256);
+ ASSERT_FALSE(x == options.end()); // option 256 should exist
+ EXPECT_EQ(256, x->second->getType()); // this should be option 256
+ ASSERT_EQ(8, x->second->len()); // it should be of length 7
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+26, 4)); // data len=4
+
+ x = options.find(257);
+ ASSERT_FALSE(x == options.end()); // option 257 should exist
+ EXPECT_EQ(257, x->second->getType()); // this should be option 257
+ ASSERT_EQ(5, x->second->len()); // it should be of length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+34, 1)); // data len=1
+
+ x = options.find(0);
+ EXPECT_TRUE(x == options.end()); // option 0 not found
+
+ x = options.find(1); // 1 is htons(256) on little endians. Worth checking
+ EXPECT_TRUE(x == options.end()); // option 1 not found
+
+ x = options.find(2);
+ EXPECT_TRUE(x == options.end()); // option 2 not found
+
+ x = options.find(32000);
+ EXPECT_TRUE(x == options.end()); // option 32000 not found
+}
+
+
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 254, 3, 30, 31, 32,
+ 128, 3, 40, 41, 42
+};
+
+TEST(LibDhcpTest, packOptions4) {
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].resize(3);
+ payload[i][0] = i*10;
+ payload[i][1] = i*10+1;
+ payload[i][2] = i*10+2;
+ }
+
+ OptionPtr opt1(new Option(Option::V4, 12, payload[0]));
+ OptionPtr opt2(new Option(Option::V4, 13, payload[1]));
+ OptionPtr opt3(new Option(Option::V4, 14, payload[2]));
+ OptionPtr opt4(new Option(Option::V4,254, payload[3]));
+ OptionPtr opt5(new Option(Option::V4,128, payload[4]));
+
+ isc::dhcp::Option::OptionCollection opts; // list of options
+ opts.insert(make_pair(opt1->getType(), opt1));
+ opts.insert(make_pair(opt1->getType(), opt2));
+ opts.insert(make_pair(opt1->getType(), opt3));
+ opts.insert(make_pair(opt1->getType(), opt4));
+ opts.insert(make_pair(opt1->getType(), opt5));
+
+ vector<uint8_t> expVect(v4Opts, v4Opts + sizeof(v4Opts));
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(LibDHCP::packOptions(buf, opts));
+ ASSERT_EQ(buf.getLength(), sizeof(v4Opts));
+ EXPECT_EQ(0, memcmp(v4Opts, buf.getData(), sizeof(v4Opts)));
+
+}
+
+TEST(LibDhcpTest, unpackOptions4) {
+
+ vector<uint8_t> packed(v4Opts, v4Opts + sizeof(v4Opts));
+ isc::dhcp::Option::OptionCollection options; // list of options
+
+ ASSERT_NO_THROW(
+ LibDHCP::unpackOptions4(packed, options);
+ );
+
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(12, x->second->getType()); // this should be option 12
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = options.find(13);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(13, x->second->getType()); // this should be option 13
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = options.find(14);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(14, x->second->getType()); // this should be option 14
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = options.find(254);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(254, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = options.find(128);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(128, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+22, 3)); // data len=3
+
+ x = options.find(0);
+ EXPECT_TRUE(x == options.end()); // option 0 not found
+
+ x = options.find(1);
+ EXPECT_TRUE(x == options.end()); // option 1 not found
+
+ x = options.find(2);
+ EXPECT_TRUE(x == options.end()); // option 2 not found
+}
+
+}
diff --git a/src/lib/dhcp/tests/libdhcp_unittest.cc b/src/lib/dhcp/tests/libdhcp_unittest.cc
deleted file mode 100644
index bfd0f6e..0000000
--- a/src/lib/dhcp/tests/libdhcp_unittest.cc
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <config.h>
-#include <iostream>
-#include <sstream>
-
-#ifdef _WIN32
-#include <ws2tcpip.h>
-#else
-#include <arpa/inet.h>
-#endif
-#include <gtest/gtest.h>
-
-#include "dhcp/libdhcp.h"
-#include "config.h"
-
-using namespace std;
-using namespace isc;
-using namespace isc::dhcp;
-
-namespace {
-class LibDhcpTest : public ::testing::Test {
-public:
- LibDhcpTest() {
- }
-};
-
-static const uint8_t packed[] = {
- 0, 12, 0, 5, 100, 101, 102, 103, 104, // opt1 (9 bytes)
- 0, 13, 0, 3, 105, 106, 107, // opt2 (7 bytes)
- 0, 14, 0, 2, 108, 109, // opt3 (6 bytes)
- 1, 0, 0, 4, 110, 111, 112, 113, // opt4 (8 bytes)
- 1, 1, 0, 1, 114 // opt5 (5 bytes)
-};
-
-TEST_F(LibDhcpTest, packOptions6) {
- boost::shared_array<uint8_t> buf(new uint8_t[512]);
- isc::dhcp::Option::Option6Collection opts; // list of options
-
- // generate content for options
- for (int i = 0; i < 64; i++) {
- buf[i]=i+100;
- }
-
- boost::shared_ptr<Option> opt1(new Option(Option::V6, 12, buf, 0, 5));
- boost::shared_ptr<Option> opt2(new Option(Option::V6, 13, buf, 5, 3));
- boost::shared_ptr<Option> opt3(new Option(Option::V6, 14, buf, 8, 2));
- boost::shared_ptr<Option> opt4(new Option(Option::V6,256, buf,10, 4));
- boost::shared_ptr<Option> opt5(new Option(Option::V6,257, buf,14, 1));
-
- opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
- opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
- opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
- opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
- opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
-
- unsigned int offset;
- EXPECT_NO_THROW ({
- offset = LibDHCP::packOptions6(buf, 512, 100, opts);
- });
- EXPECT_EQ(135, offset); // options should take 35 bytes
- EXPECT_EQ(0, memcmp(&buf[100], packed, 35) );
-}
-
-TEST_F(LibDhcpTest, unpackOptions6) {
-
- // just couple of random options
- // Option is used as a simple option implementation
- // More advanced uses are validated in tests dedicated for
- // specific derived classes.
- isc::dhcp::Option::Option6Collection options; // list of options
-
- // we can't use packed directly, as shared_array would try to
- // free it eventually
- boost::shared_array<uint8_t> buf(new uint8_t[512]);
- memcpy(&buf[0], packed, 35);
-
- unsigned int offset;
- EXPECT_NO_THROW ({
- offset = LibDHCP::unpackOptions6(buf, 512, 0, 35, options);
- });
-
- EXPECT_EQ(35, offset); // parsed first 35 bytes (offset 0..34)
- EXPECT_EQ(options.size(), 5); // there should be 5 options
-
- isc::dhcp::Option::Option6Collection::const_iterator x = options.find(12);
- ASSERT_FALSE(x == options.end()); // option 1 should exist
- EXPECT_EQ(12, x->second->getType()); // this should be option 12
- ASSERT_EQ(9, x->second->len()); // it should be of length 9
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+4, 5)); // data len=5
-
- x = options.find(13);
- ASSERT_FALSE(x == options.end()); // option 13 should exist
- EXPECT_EQ(13, x->second->getType()); // this should be option 13
- ASSERT_EQ(7, x->second->len()); // it should be of length 7
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+13, 3)); // data len=3
-
- x = options.find(14);
- ASSERT_FALSE(x == options.end()); // option 3 should exist
- EXPECT_EQ(14, x->second->getType()); // this should be option 14
- ASSERT_EQ(6, x->second->len()); // it should be of length 6
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+20, 2)); // data len=2
-
- x = options.find(256);
- ASSERT_FALSE(x == options.end()); // option 256 should exist
- EXPECT_EQ(256, x->second->getType()); // this should be option 256
- ASSERT_EQ(8, x->second->len()); // it should be of length 7
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+26, 4)); // data len=4
-
- x = options.find(257);
- ASSERT_FALSE(x == options.end()); // option 257 should exist
- EXPECT_EQ(257, x->second->getType()); // this should be option 257
- ASSERT_EQ(5, x->second->len()); // it should be of length 5
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+34, 1)); // data len=1
-
- x = options.find(0);
- EXPECT_TRUE(x == options.end()); // option 0 not found
-
- x = options.find(1); // 1 is htons(256) on little endians. Worth checking
- EXPECT_TRUE(x == options.end()); // option 1 not found
-
- x = options.find(2);
- EXPECT_TRUE(x == options.end()); // option 2 not found
-
- x = options.find(32000);
- EXPECT_TRUE(x == options.end()); // option 32000 not found
-}
-
-}
diff --git a/src/lib/dhcp/tests/option4_addrlst_unittest.cc b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
new file mode 100644
index 0000000..f011140
--- /dev/null
+++ b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
@@ -0,0 +1,277 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#else
+#include <arpa/inet.h>
+#endif
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/option.h>
+#include <dhcp/option4_addrlst.h>
+#include <util/buffer.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+namespace {
+
+// a sample data (list of 4 addresses)
+const uint8_t sampledata[] = {
+ 192, 0, 2, 3, // 192.0.2.3
+ 255, 255, 255, 0, // 255.255.255.0 - popular netmask
+ 0, 0, 0 , 0, // used for default routes or (any address)
+ 127, 0, 0, 1 // loopback
+};
+
+// expected on-wire format for an option with 1 address
+const uint8_t expected1[] = { // 1 address
+ DHO_DOMAIN_NAME_SERVERS, 4, // type, length
+ 192, 0, 2, 3, // 192.0.2.3
+};
+
+// expected on-wire format for an option with 4 addresses
+const uint8_t expected4[] = { // 4 addresses
+ 254, 16, // type = 254, len = 16
+ 192, 0, 2, 3, // 192.0.2.3
+ 255, 255, 255, 0, // 255.255.255.0 - popular netmask
+ 0, 0, 0 ,0, // used for default routes or (any address)
+ 127, 0, 0, 1 // loopback
+};
+
+class Option4AddrLstTest : public ::testing::Test {
+protected:
+
+ Option4AddrLstTest():
+ vec_(vector<uint8_t>(300,0)) // 300 bytes long filled with 0s
+ {
+ sampleAddrs_.push_back(IOAddress("192.0.2.3"));
+ sampleAddrs_.push_back(IOAddress("255.255.255.0"));
+ sampleAddrs_.push_back(IOAddress("0.0.0.0"));
+ sampleAddrs_.push_back(IOAddress("127.0.0.1"));
+ }
+
+ vector<uint8_t> vec_;
+ Option4AddrLst::AddressContainer sampleAddrs_;
+
+};
+
+TEST_F(Option4AddrLstTest, parse1) {
+
+ memcpy(&vec_[0], sampledata, sizeof(sampledata));
+
+ // just one address
+ Option4AddrLst* opt1 = 0;
+ EXPECT_NO_THROW(
+ opt1 = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS,
+ vec_.begin(),
+ vec_.begin()+4);
+ // use just first address (4 bytes), not the whole
+ // sampledata
+ );
+
+ EXPECT_EQ(Option::V4, opt1->getUniverse());
+
+ EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt1->getType());
+ EXPECT_EQ(6, opt1->len()); // 2 (header) + 4 (1x IPv4 addr)
+
+ Option4AddrLst::AddressContainer addrs = opt1->getAddresses();
+ ASSERT_EQ(1, addrs.size());
+
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ opt1 = 0;
+ );
+
+ // 1 address
+}
+
+TEST_F(Option4AddrLstTest, parse4) {
+
+ vector<uint8_t> buffer(300,0); // 300 bytes long filled with 0s
+
+ memcpy(&buffer[0], sampledata, sizeof(sampledata));
+
+ // 4 addresses
+ Option4AddrLst* opt4 = 0;
+ EXPECT_NO_THROW(
+ opt4 = new Option4AddrLst(254,
+ buffer.begin(),
+ buffer.begin()+sizeof(sampledata));
+ );
+
+ EXPECT_EQ(Option::V4, opt4->getUniverse());
+
+ EXPECT_EQ(254, opt4->getType());
+ EXPECT_EQ(18, opt4->len()); // 2 (header) + 16 (4x IPv4 addrs)
+
+ Option4AddrLst::AddressContainer addrs = opt4->getAddresses();
+ ASSERT_EQ(4, addrs.size());
+
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+ EXPECT_EQ("255.255.255.0", addrs[1].toText());
+ EXPECT_EQ("0.0.0.0", addrs[2].toText());
+ EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+ EXPECT_NO_THROW(
+ delete opt4;
+ opt4 = 0;
+ );
+}
+
+TEST_F(Option4AddrLstTest, assembly1) {
+
+ Option4AddrLst* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("192.0.2.3"));
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
+ EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt->getType());
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(1, addrs.size() );
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ ASSERT_EQ(6, opt->len());
+ ASSERT_EQ(6, buf.getLength());
+
+ EXPECT_EQ(0, memcmp(expected1, buf.getData(), 6));
+
+ EXPECT_NO_THROW(
+ delete opt;
+ opt = 0;
+ );
+
+ // This is old-fashioned option. We don't serve IPv6 types here!
+ EXPECT_THROW(
+ opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("2001:db8::1")),
+ BadValue
+ );
+ if (opt) {
+ // test failed. Execption was not thrown, but option was created instead.
+ delete opt;
+ }
+}
+
+TEST_F(Option4AddrLstTest, assembly4) {
+
+
+ Option4AddrLst* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(254, sampleAddrs_);
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
+ EXPECT_EQ(254, opt->getType());
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(4, addrs.size() );
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+ EXPECT_EQ("255.255.255.0", addrs[1].toText());
+ EXPECT_EQ("0.0.0.0", addrs[2].toText());
+ EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ ASSERT_EQ(18, opt->len()); // 2(header) + 4xsizeof(IPv4addr)
+ ASSERT_EQ(18, buf.getLength());
+
+ ASSERT_EQ(0, memcmp(expected4, buf.getData(), 18));
+
+ EXPECT_NO_THROW(
+ delete opt;
+ opt = 0;
+ );
+
+ // This is old-fashioned option. We don't serve IPv6 types here!
+ sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+ EXPECT_THROW(
+ opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, sampleAddrs_),
+ BadValue
+ );
+ if (opt) {
+ // test failed. Execption was not thrown, but option was created instead.
+ delete opt;
+ }
+}
+
+TEST_F(Option4AddrLstTest, setAddress) {
+ Option4AddrLst* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(123, IOAddress("1.2.3.4"));
+ );
+ opt->setAddress(IOAddress("192.0.255.255"));
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(1, addrs.size() );
+ EXPECT_EQ("192.0.255.255", addrs[0].toText());
+
+ // We should accept IPv4-only addresses.
+ EXPECT_THROW(
+ opt->setAddress(IOAddress("2001:db8::1")),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(Option4AddrLstTest, setAddresses) {
+
+ Option4AddrLst* opt = 0;
+
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(123); // empty list
+ );
+
+ opt->setAddresses(sampleAddrs_);
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(4, addrs.size() );
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+ EXPECT_EQ("255.255.255.0", addrs[1].toText());
+ EXPECT_EQ("0.0.0.0", addrs[2].toText());
+ EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+ // We should accept IPv4-only addresses.
+ sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+ EXPECT_THROW(
+ opt->setAddresses(sampleAddrs_),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option6_addrlst_unittest.cc b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
index d54282a..9aabe13 100644
--- a/src/lib/dhcp/tests/option6_addrlst_unittest.cc
+++ b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -15,37 +15,42 @@
#include <config.h>
#include <iostream>
#include <sstream>
-
#ifdef _WIN32
#include <ws2tcpip.h>
#else
#include <arpa/inet.h>
#endif
#include <gtest/gtest.h>
-
-#include "asiolink/io_address.h"
-#include "dhcp/dhcp6.h"
-#include "dhcp/option.h"
-#include "dhcp/option6_addrlst.h"
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_addrlst.h>
+#include <util/buffer.h>
using namespace std;
using namespace isc;
using namespace isc::dhcp;
using namespace isc::asiolink;
+using namespace isc::util;
namespace {
class Option6AddrLstTest : public ::testing::Test {
public:
- Option6AddrLstTest() {
+ Option6AddrLstTest(): buf_(255), outBuf_(255) {
+ for (int i = 0; i < 255; i++) {
+ buf_[i] = 255 - i;
+ }
}
+ OptionBuffer buf_;
+ OutputBuffer outBuf_;
};
TEST_F(Option6AddrLstTest, basic) {
- // limiting tests to just a 2001:db8::/32 as is *wrong*.
+ // Limiting tests to just a 2001:db8::/32 as is *wrong*.
// Good tests check corner cases as well.
// ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff checks
- // for integer overflow
+ // for integer overflow.
// ff02::face:b00c checks if multicast addresses
// can be represented properly.
@@ -103,18 +108,16 @@ TEST_F(Option6AddrLstTest, basic) {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
- boost::shared_array<uint8_t> buf(new uint8_t[300]);
- for (int i = 0; i < 300; i++)
- buf[i] = 0;
-
- memcpy(&buf[0], sampledata, 48);
+ memcpy(&buf_[0], sampledata, 48);
// just a single address
Option6AddrLst* opt1 = 0;
EXPECT_NO_THROW(
- opt1 = new Option6AddrLst(D6O_NAME_SERVERS, buf, 128, 0, 16);
+ opt1 = new Option6AddrLst(D6O_NAME_SERVERS, buf_.begin(), buf_.begin() + 16 );
);
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
+
EXPECT_EQ(D6O_NAME_SERVERS, opt1->getType());
EXPECT_EQ(20, opt1->len());
Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
@@ -122,17 +125,16 @@ TEST_F(Option6AddrLstTest, basic) {
IOAddress addr = addrs[0];
EXPECT_EQ("2001:db8:1::dead:beef", addr.toText());
- // pack this option again in the same buffer, but in
- // different place
- int offset = opt1->pack(buf,300, 100);
+ // pack this option
+ opt1->pack(outBuf_);
- EXPECT_EQ(120, offset);
- EXPECT_EQ( 0, memcmp(expected1, &buf[100], 20) );
+ EXPECT_EQ(20, outBuf_.getLength());
+ EXPECT_EQ(0, memcmp(expected1, outBuf_.getData(), 20));
// two addresses
Option6AddrLst* opt2 = 0;
EXPECT_NO_THROW(
- opt2 = new Option6AddrLst(D6O_SIP_SERVERS_ADDR, buf, 128, 0, 32);
+ opt2 = new Option6AddrLst(D6O_SIP_SERVERS_ADDR, buf_.begin(), buf_.begin() + 32);
);
EXPECT_EQ(D6O_SIP_SERVERS_ADDR, opt2->getType());
EXPECT_EQ(36, opt2->len());
@@ -141,17 +143,17 @@ TEST_F(Option6AddrLstTest, basic) {
EXPECT_EQ("2001:db8:1::dead:beef", addrs[0].toText());
EXPECT_EQ("ff02::face:b00c", addrs[1].toText());
- // pack this option again in the same buffer, but in
- // different place
- offset = opt2->pack(buf,300, 150);
+ // pack this option
+ outBuf_.clear();
+ opt2->pack(outBuf_);
- EXPECT_EQ(150+36, offset);
- EXPECT_EQ( 0, memcmp(expected2, &buf[150], 36));
+ EXPECT_EQ(36, outBuf_.getLength() );
+ EXPECT_EQ(0, memcmp(expected2, outBuf_.getData(), 36));
// three addresses
Option6AddrLst* opt3 = 0;
EXPECT_NO_THROW(
- opt3 = new Option6AddrLst(D6O_NIS_SERVERS, buf, 128, 0, 48);
+ opt3 = new Option6AddrLst(D6O_NIS_SERVERS, buf_.begin(), buf_.begin() + 48);
);
EXPECT_EQ(D6O_NIS_SERVERS, opt3->getType());
@@ -162,12 +164,12 @@ TEST_F(Option6AddrLstTest, basic) {
EXPECT_EQ("ff02::face:b00c", addrs[1].toText());
EXPECT_EQ("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", addrs[2].toText());
- // pack this option again in the same buffer, but in
- // different place
- offset = opt3->pack(buf,300, 200);
+ // pack this option
+ outBuf_.clear();
+ opt3->pack(outBuf_);
- EXPECT_EQ(252, offset);
- EXPECT_EQ( 0, memcmp(expected3, &buf[200], 52) );
+ EXPECT_EQ(52, outBuf_.getLength());
+ EXPECT_EQ(0, memcmp(expected3, outBuf_.getData(), 52));
EXPECT_NO_THROW(
delete opt1;
@@ -182,6 +184,7 @@ TEST_F(Option6AddrLstTest, constructors) {
EXPECT_NO_THROW(
opt1 = new Option6AddrLst(1234, IOAddress("::1"));
);
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
EXPECT_EQ(1234, opt1->getType());
Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
diff --git a/src/lib/dhcp/tests/option6_ia_unittest.cc b/src/lib/dhcp/tests/option6_ia_unittest.cc
index b396575..6711317 100644
--- a/src/lib/dhcp/tests/option6_ia_unittest.cc
+++ b/src/lib/dhcp/tests/option6_ia_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -23,54 +23,53 @@
#endif
#include <gtest/gtest.h>
-#include <boost/shared_array.hpp>
-#include <boost/shared_ptr.hpp>
-
-#include "dhcp/dhcp6.h"
-#include "dhcp/option.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/option6_iaaddr.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+#include <util/buffer.h>
using namespace std;
using namespace isc;
using namespace isc::dhcp;
using namespace isc::asiolink;
+using namespace isc::util;
namespace {
class Option6IATest : public ::testing::Test {
public:
- Option6IATest() {
+ Option6IATest(): buf_(255), outBuf_(255) {
+ for (int i = 0; i < 255; i++) {
+ buf_[i] = 255 - i;
+ }
}
+ OptionBuffer buf_;
+ OutputBuffer outBuf_;
};
TEST_F(Option6IATest, basic) {
+ buf_[0] = 0xa1; // iaid
+ buf_[1] = 0xa2;
+ buf_[2] = 0xa3;
+ buf_[3] = 0xa4;
- boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
- for (int i = 0; i < 128; i++)
- simple_buf[i] = 0;
- simple_buf[0] = 0xa1; // iaid
- simple_buf[1] = 0xa2;
- simple_buf[2] = 0xa3;
- simple_buf[3] = 0xa4;
-
- simple_buf[4] = 0x81; // T1
- simple_buf[5] = 0x02;
- simple_buf[6] = 0x03;
- simple_buf[7] = 0x04;
+ buf_[4] = 0x81; // T1
+ buf_[5] = 0x02;
+ buf_[6] = 0x03;
+ buf_[7] = 0x04;
- simple_buf[8] = 0x84; // T2
- simple_buf[9] = 0x03;
- simple_buf[10] = 0x02;
- simple_buf[11] = 0x01;
+ buf_[8] = 0x84; // T2
+ buf_[9] = 0x03;
+ buf_[10] = 0x02;
+ buf_[11] = 0x01;
// create an option
// unpack() is called from constructor
Option6IA* opt = new Option6IA(D6O_IA_NA,
- simple_buf,
- 128,
- 0,
- 12);
+ buf_.begin(),
+ buf_.begin() + 12);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
EXPECT_EQ(D6O_IA_NA, opt->getType());
EXPECT_EQ(0xa1a2a3a4, opt->getIAID());
EXPECT_EQ(0x81020304, opt->getT1());
@@ -80,36 +79,31 @@ TEST_F(Option6IATest, basic) {
// different place
// test for pack()
- int offset = opt->pack(simple_buf, 128, 60);
+ opt->pack(outBuf_);
- // 4 bytes header + 4 bytes content
- EXPECT_EQ(12, opt->len() - 4);
+ // 12 bytes header + 4 bytes content
+ EXPECT_EQ(12, opt->len() - opt->getHeaderLen());
EXPECT_EQ(D6O_IA_NA, opt->getType());
- EXPECT_EQ(offset, 76); // 60 + lenght(IA_NA) = 76
+ EXPECT_EQ(16, outBuf_.getLength()); // lenght(IA_NA) = 16
// check if pack worked properly:
+ InputBuffer out(outBuf_.getData(), outBuf_.getLength());
+
// if option type is correct
- EXPECT_EQ(D6O_IA_NA, simple_buf[60]*256 + simple_buf[61]);
+ EXPECT_EQ(D6O_IA_NA, out.readUint16());
// if option length is correct
- EXPECT_EQ(12, simple_buf[62]*256 + simple_buf[63]);
+ EXPECT_EQ(12, out.readUint16());
// if iaid is correct
- unsigned int iaid = htonl(*(unsigned int*)&simple_buf[64]);
- EXPECT_EQ(0xa1a2a3a4, iaid );
+ EXPECT_EQ(0xa1a2a3a4, out.readUint32() );
// if T1 is correct
- EXPECT_EQ(0x81020304, (simple_buf[68] << 24) +
- (simple_buf[69] << 16) +
- (simple_buf[70] << 8) +
- (simple_buf[71]) );
+ EXPECT_EQ(0x81020304, out.readUint32() );
// if T1 is correct
- EXPECT_EQ(0x84030201, (simple_buf[72] << 24) +
- (simple_buf[73] << 16) +
- (simple_buf[74] << 8) +
- (simple_buf[75]) );
+ EXPECT_EQ(0x84030201, out.readUint32() );
EXPECT_NO_THROW(
delete opt;
@@ -117,14 +111,11 @@ TEST_F(Option6IATest, basic) {
}
TEST_F(Option6IATest, simple) {
- boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
- for (int i = 0; i < 128; i++)
- simple_buf[i] = 0;
-
Option6IA * ia = new Option6IA(D6O_IA_NA, 1234);
ia->setT1(2345);
ia->setT2(3456);
+ EXPECT_EQ(Option::V6, ia->getUniverse());
EXPECT_EQ(D6O_IA_NA, ia->getType());
EXPECT_EQ(1234, ia->getIAID());
EXPECT_EQ(2345, ia->getT1());
@@ -135,25 +126,21 @@ TEST_F(Option6IATest, simple) {
);
}
+
// test if option can build suboptions
TEST_F(Option6IATest, suboptions_pack) {
- boost::shared_array<uint8_t> buf(new uint8_t[128]);
- for (int i=0; i<128; i++)
- buf[i] = 0;
- buf[0] = 0xff;
- buf[1] = 0xfe;
- buf[2] = 0xfc;
+ buf_[0] = 0xff;
+ buf_[1] = 0xfe;
+ buf_[2] = 0xfc;
Option6IA * ia = new Option6IA(D6O_IA_NA, 0x13579ace);
ia->setT1(0x2345);
ia->setT2(0x3456);
- boost::shared_ptr<Option> sub1(new Option(Option::V6,
- 0xcafe));
+ OptionPtr sub1(new Option(Option::V6, 0xcafe));
boost::shared_ptr<Option6IAAddr> addr1(
- new Option6IAAddr(D6O_IAADDR, IOAddress("2001:db8:1234:5678::abcd"),
- 0x5000, 0x7000));
+ new Option6IAAddr(D6O_IAADDR, IOAddress("2001:db8:1234:5678::abcd"), 0x5000, 0x7000));
ia->addOption(sub1);
ia->addOption(addr1);
@@ -182,29 +169,29 @@ TEST_F(Option6IATest, suboptions_pack) {
0, 0 // len
};
- int offset = ia->pack(buf, 128, 10);
- ASSERT_EQ(offset, 10 + 48);
+ ia->pack(outBuf_);
+ ASSERT_EQ(48, outBuf_.getLength());
- EXPECT_EQ(0, memcmp(&buf[10], expected, 48));
+ EXPECT_EQ(0, memcmp(outBuf_.getData(), expected, 48));
EXPECT_NO_THROW(
delete ia;
);
}
+
// test if option can parse suboptions
TEST_F(Option6IATest, suboptions_unpack) {
-
-
+ // sizeof (expected) = 48 bytes
uint8_t expected[] = {
- D6O_IA_NA/256, D6O_IA_NA%256, // type
+ D6O_IA_NA / 256, D6O_IA_NA % 256, // type
0, 28, // length
0x13, 0x57, 0x9a, 0xce, // iaid
0, 0, 0x23, 0x45, // T1
0, 0, 0x34, 0x56, // T2
// iaaddr suboption
- D6O_IAADDR/256, D6O_IAADDR%256, // type
+ D6O_IAADDR / 256, D6O_IAADDR % 256, // type
0, 24, // len
0x20, 0x01, 0xd, 0xb8, 0x12,0x34, 0x56, 0x78,
0, 0, 0, 0, 0, 0, 0xab, 0xcd, // IP address
@@ -215,18 +202,13 @@ TEST_F(Option6IATest, suboptions_unpack) {
0xca, 0xfe, // type
0, 0 // len
};
+ ASSERT_EQ(48, sizeof(expected));
- boost::shared_array<uint8_t> buf(new uint8_t[128]);
- for (int i = 0; i < 128; i++)
- buf[i] = 0;
- memcpy(&buf[0], expected, 48);
+ memcpy(&buf_[0], expected, sizeof(expected));
Option6IA* ia = 0;
EXPECT_NO_THROW({
- ia = new Option6IA(D6O_IA_NA, buf, 128, 4, 44);
-
- // let's limit verbosity of this test
- // cout << "Parsed option:" << endl << ia->toText() << endl;
+ ia = new Option6IA(D6O_IA_NA, buf_.begin() + 4, buf_.begin() + sizeof(expected));
});
ASSERT_TRUE(ia);
@@ -235,8 +217,8 @@ TEST_F(Option6IATest, suboptions_unpack) {
EXPECT_EQ(0x2345, ia->getT1());
EXPECT_EQ(0x3456, ia->getT2());
- boost::shared_ptr<Option> subopt = ia->getOption(D6O_IAADDR);
- ASSERT_NE(boost::shared_ptr<Option>(), subopt); // non-NULL
+ OptionPtr subopt = ia->getOption(D6O_IAADDR);
+ ASSERT_NE(OptionPtr(), subopt); // non-NULL
// checks for address option
Option6IAAddr * addr = dynamic_cast<Option6IAAddr*>(subopt.get());
@@ -254,7 +236,8 @@ TEST_F(Option6IATest, suboptions_unpack) {
EXPECT_EQ(0xcafe, subopt->getType());
EXPECT_EQ(4, subopt->len());
- EXPECT_EQ(NULL, subopt->getData());
+ // there should be no data at all
+ EXPECT_EQ(0, subopt->getData().size());
subopt = ia->getOption(1); // get option 1
ASSERT_FALSE(subopt); // should be NULL
diff --git a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
index 0f355c2..129b807 100644
--- a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
+++ b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -23,61 +23,64 @@
#endif
#include <gtest/gtest.h>
-#include "dhcp/dhcp6.h"
-#include "dhcp/option.h"
-#include "dhcp/option6_iaaddr.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_iaaddr.h>
+#include <util/buffer.h>
using namespace std;
using namespace isc;
using namespace isc::dhcp;
+using namespace isc::util;
namespace {
class Option6IAAddrTest : public ::testing::Test {
public:
- Option6IAAddrTest() {
+ Option6IAAddrTest() : buf_(255), outBuf_(255) {
+ for (int i = 0; i < 255; i++) {
+ buf_[i] = 255 - i;
+ }
}
+ OptionBuffer buf_;
+ OutputBuffer outBuf_;
};
-/// TODO reenable this once ticket #1313 is implemented.
TEST_F(Option6IAAddrTest, basic) {
-
- boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
- for (int i = 0; i < 128; i++)
- simple_buf[i] = 0;
-
- simple_buf[0] = 0x20;
- simple_buf[1] = 0x01;
- simple_buf[2] = 0x0d;
- simple_buf[3] = 0xb8;
- simple_buf[4] = 0x00;
- simple_buf[5] = 0x01;
- simple_buf[12] = 0xde;
- simple_buf[13] = 0xad;
- simple_buf[14] = 0xbe;
- simple_buf[15] = 0xef; // 2001:db8:1::dead:beef
-
- simple_buf[16] = 0x00;
- simple_buf[17] = 0x00;
- simple_buf[18] = 0x03;
- simple_buf[19] = 0xe8; // 1000
-
- simple_buf[20] = 0xb2;
- simple_buf[21] = 0xd0;
- simple_buf[22] = 0x5e;
- simple_buf[23] = 0x00; // 3,000,000,000
+ for (int i = 0; i < 255; i++) {
+ buf_[i] = 0;
+ }
+ buf_[0] = 0x20;
+ buf_[1] = 0x01;
+ buf_[2] = 0x0d;
+ buf_[3] = 0xb8;
+ buf_[4] = 0x00;
+ buf_[5] = 0x01;
+ buf_[12] = 0xde;
+ buf_[13] = 0xad;
+ buf_[14] = 0xbe;
+ buf_[15] = 0xef; // 2001:db8:1::dead:beef
+
+ buf_[16] = 0x00;
+ buf_[17] = 0x00;
+ buf_[18] = 0x03;
+ buf_[19] = 0xe8; // 1000
+
+ buf_[20] = 0xb2;
+ buf_[21] = 0xd0;
+ buf_[22] = 0x5e;
+ buf_[23] = 0x00; // 3,000,000,000
// create an option (unpack content)
Option6IAAddr* opt = new Option6IAAddr(D6O_IAADDR,
- simple_buf,
- 128,
- 0,
- 24);
+ buf_.begin(),
+ buf_.begin() + 24);
- // pack this option again in the same buffer, but in
- // different place
- int offset = opt->pack(simple_buf, 128, 50);
+ // pack this option
+ opt->pack(outBuf_);
- EXPECT_EQ(78, offset);
+ EXPECT_EQ(28, outBuf_.getLength());
+
+ EXPECT_EQ(Option::V6, opt->getUniverse());
// 4 bytes header + 4 bytes content
EXPECT_EQ("2001:db8:1::dead:beef", opt->getAddress().toText());
@@ -90,18 +93,18 @@ TEST_F(Option6IAAddrTest, basic) {
opt->len());
// check if pack worked properly:
+ const uint8_t* out = (const uint8_t*)outBuf_.getData();
+
// if option type is correct
- EXPECT_EQ(D6O_IAADDR, simple_buf[50]*256 + simple_buf[51]);
+ EXPECT_EQ(D6O_IAADDR, out[0]*256 + out[1]);
// if option length is correct
- EXPECT_EQ(24, simple_buf[52]*256 + simple_buf[53]);
+ EXPECT_EQ(24, out[2]*256 + out[3]);
// if option content is correct
- EXPECT_EQ(0, memcmp(&simple_buf[0], &simple_buf[54],24));
+ EXPECT_EQ(0, memcmp(out + 4, &buf_[0], 24));
- EXPECT_NO_THROW(
- delete opt;
- );
+ EXPECT_NO_THROW( delete opt );
}
}
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index d4bd018..f5e2256 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,8 @@
#endif
#include <gtest/gtest.h>
#include <boost/shared_ptr.hpp>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
#include "dhcp/dhcp6.h"
#include "dhcp/option.h"
@@ -30,35 +32,189 @@
using namespace std;
using namespace isc;
using namespace isc::dhcp;
+using namespace isc::util;
namespace {
class OptionTest : public ::testing::Test {
public:
- OptionTest() {
+ OptionTest(): buf_(255), outBuf_(255) {
+ for (int i = 0; i < 255; i++) {
+ buf_[i] = 255 - i;
+ }
}
+ OptionBuffer buf_;
+ OutputBuffer outBuf_;
};
// v4 is not really implemented yet. A simple test will do for now
-TEST_F(OptionTest, basic4) {
+TEST_F(OptionTest, v4_basic) {
- Option* opt = new Option(Option::V4, 17);
+ Option* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option(Option::V4, 17);
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
EXPECT_EQ(17, opt->getType());
- EXPECT_EQ(NULL, opt->getData());
+ EXPECT_EQ(0, opt->getData().size());
EXPECT_EQ(2, opt->len()); // just v4 header
EXPECT_NO_THROW(
delete opt;
);
+ opt = 0;
+
+ // V4 options have type 0...255
+ EXPECT_THROW(
+ opt = new Option(Option::V4, 256),
+ BadValue
+ );
+
+ delete opt;
+ opt = 0;
+
+ // 0 is a special PAD option
+ EXPECT_THROW(
+ opt = new Option(Option::V4, 0),
+ BadValue
+ );
+
+ delete opt;
+ opt = 0;
+
+ // 255 is a special END option
+ EXPECT_THROW(
+ opt = new Option(Option::V4, 255),
+ BadValue
+ );
+
+ delete opt;
+ opt = 0;
+}
+
+const uint8_t dummyPayload[] =
+{ 1, 2, 3, 4};
+
+TEST_F(OptionTest, v4_data1) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ Option* opt = 0;
+
+ // create DHCPv4 option of type 123
+ // that contains 4 bytes of data
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4, 123, data);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), data.size());
+ EXPECT_TRUE(optData == data);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// this is almost the same test as v4_data1, but it uses
+// different constructor
+TEST_F(OptionTest, v4_data2) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ vector<uint8_t> expData = data;
+
+ // Add fake data in front and end. Main purpose of this test is to check
+ // that only subset of the whole vector can be used for creating option.
+ data.insert(data.begin(), 56);
+ data.push_back(67);
+
+ // Data contains extra garbage at beginning and at the end. It should be
+ // ignored, as we pass interators to proper data. Only subset (limited by
+ // iterators) of the vector should be used.
+ // expData contains expected content (just valid data, without garbage).
+
+ Option* opt = 0;
+
+ // Create DHCPv4 option of type 123 that contains
+ // 4 bytes (sizeof(dummyPayload).
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4, 123, data.begin() + 1, data.end() - 1);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), expData.size());
+ EXPECT_TRUE(optData == expData);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(OptionTest, v4_toText) {
+
+ vector<uint8_t> buf(3);
+ buf[0] = 0;
+ buf[1] = 0xf;
+ buf[2] = 0xff;
+
+ Option opt(Option::V4, 253, buf);
+
+ EXPECT_EQ("type=253, len=3: 00:0f:ff", opt.toText());
}
// tests simple constructor
-TEST_F(OptionTest, basic6) {
+TEST_F(OptionTest, v6_basic) {
Option* opt = new Option(Option::V6, 1);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
EXPECT_EQ(1, opt->getType());
- EXPECT_EQ(NULL, opt->getData());
+ EXPECT_EQ(0, opt->getData().size());
EXPECT_EQ(4, opt->len()); // just v6 header
EXPECT_NO_THROW(
@@ -68,30 +224,30 @@ TEST_F(OptionTest, basic6) {
// tests contructor used in pkt reception
// option contains actual data
-TEST_F(OptionTest, data1) {
- boost::shared_array<uint8_t> buf(new uint8_t[32]);
+TEST_F(OptionTest, v6_data1) {
for (int i = 0; i < 32; i++)
- buf[i] = 100+i;
- Option* opt = new Option(Option::V6, 333, //type
- buf,
- 3, // offset
- 7); // 7 bytes of data
+ buf_[i] = 100+i;
+ Option* opt = new Option(Option::V6, 333, //type
+ buf_.begin() + 3, // begin offset
+ buf_.begin() + 10); // end offset (7 bytes of data)
EXPECT_EQ(333, opt->getType());
- ASSERT_EQ(&buf[3], opt->getData());
+
ASSERT_EQ(11, opt->len());
- EXPECT_EQ(0, memcmp(&buf[3], opt->getData(), 7) );
+ ASSERT_EQ(7, opt->getData().size());
+ EXPECT_EQ(0, memcmp(&buf_[3], &opt->getData()[0], 7) );
- int offset = opt->pack(buf, 32, 20);
- EXPECT_EQ(31, offset);
+ opt->pack(outBuf_);
+ EXPECT_EQ(11, outBuf_.getLength());
- EXPECT_EQ(buf[20], 333/256); // type
- EXPECT_EQ(buf[21], 333%256);
+ const uint8_t* out = (const uint8_t*)outBuf_.getData();
+ EXPECT_EQ(out[0], 333/256); // type
+ EXPECT_EQ(out[1], 333%256);
- EXPECT_EQ(buf[22], 0); // len
- EXPECT_EQ(buf[23], 7);
+ EXPECT_EQ(out[2], 0); // len
+ EXPECT_EQ(out[3], 7);
// payload
- EXPECT_EQ(0, memcmp(&buf[3], &buf[24], 7) );
+ EXPECT_EQ(0, memcmp(&buf_[3], out+4, 7) );
EXPECT_NO_THROW(
delete opt;
@@ -100,42 +256,39 @@ TEST_F(OptionTest, data1) {
// another text that tests the same thing, just
// with different input parameters
-TEST_F(OptionTest, data2) {
+TEST_F(OptionTest, v6_data2) {
- boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
- for (int i = 0; i < 128; i++)
- simple_buf[i] = 0;
- simple_buf[0] = 0xa1;
- simple_buf[1] = 0xa2;
- simple_buf[2] = 0xa3;
- simple_buf[3] = 0xa4;
+ buf_[0] = 0xa1;
+ buf_[1] = 0xa2;
+ buf_[2] = 0xa3;
+ buf_[3] = 0xa4;
// create an option (unpack content)
Option* opt = new Option(Option::V6,
D6O_CLIENTID,
- simple_buf,
- 0,
- 4);
+ buf_.begin(),
+ buf_.begin() + 4);
- // pack this option again in the same buffer, but in
- // different place
- int offset18 = opt->pack(simple_buf, 128, 10);
+ // pack this option
+ opt->pack(outBuf_);
// 4 bytes header + 4 bytes content
EXPECT_EQ(8, opt->len());
EXPECT_EQ(D6O_CLIENTID, opt->getType());
- EXPECT_EQ(offset18, 18);
+ EXPECT_EQ(8, outBuf_.getLength());
// check if pack worked properly:
// if option type is correct
- EXPECT_EQ(D6O_CLIENTID, simple_buf[10]*256 + simple_buf[11]);
+ const uint8_t* out = (const uint8_t*)outBuf_.getData();
+
+ EXPECT_EQ(D6O_CLIENTID, out[0]*256 + out[1]);
// if option length is correct
- EXPECT_EQ(4, simple_buf[12]*256 + simple_buf[13]);
+ EXPECT_EQ(4, out[2]*256 + out[3]);
// if option content is correct
- EXPECT_EQ(0, memcmp(&simple_buf[0], &simple_buf[14],4));
+ EXPECT_EQ(0, memcmp(&buf_[0], out + 4, 4));
EXPECT_NO_THROW(
delete opt;
@@ -148,19 +301,16 @@ TEST_F(OptionTest, data2) {
// |
// +----opt3
//
-TEST_F(OptionTest, suboptions1) {
- boost::shared_array<uint8_t> buf(new uint8_t[128]);
+TEST_F(OptionTest, v6_suboptions1) {
for (int i=0; i<128; i++)
- buf[i] = 100+i;
+ buf_[i] = 100+i;
Option* opt1 = new Option(Option::V6, 65535, //type
- buf,
- 0, // offset
- 3); // 3 bytes of data
- boost::shared_ptr<Option> opt2(new Option(Option::V6, 13));
- boost::shared_ptr<Option> opt3(new Option(Option::V6, 7,
- buf,
- 3, // offset
- 5)); // 5 bytes of data
+ buf_.begin(), // 3 bytes of data
+ buf_.begin() + 3);
+ OptionPtr opt2(new Option(Option::V6, 13));
+ OptionPtr opt3(new Option(Option::V6, 7,
+ buf_.begin() + 3,
+ buf_.begin() + 8)); // 5 bytes of data
opt1->addOption(opt2);
opt1->addOption(opt3);
// opt2 len = 4 (just header)
@@ -177,36 +327,33 @@ TEST_F(OptionTest, suboptions1) {
0, 13, 0, 0 // no data at all
};
- int offset = opt1->pack(buf, 128, 20);
- EXPECT_EQ(40, offset);
+ opt1->pack(outBuf_);
+ EXPECT_EQ(20, outBuf_.getLength());
// payload
- EXPECT_EQ(0, memcmp(&buf[20], expected, 20) );
+ EXPECT_EQ(0, memcmp(outBuf_.getData(), expected, 20) );
EXPECT_NO_THROW(
delete opt1;
);
}
-// check that an option can contain 2 suboptions:
+// check that an option can contain nested suboptions:
// opt1
// +----opt2
// |
// +----opt3
//
-TEST_F(OptionTest, suboptions2) {
- boost::shared_array<uint8_t> buf(new uint8_t[128]);
+TEST_F(OptionTest, v6_suboptions2) {
for (int i=0; i<128; i++)
- buf[i] = 100+i;
+ buf_[i] = 100+i;
Option* opt1 = new Option(Option::V6, 65535, //type
- buf,
- 0, // offset
- 3); // 3 bytes of data
- boost::shared_ptr<Option> opt2(new Option(Option::V6, 13));
- boost::shared_ptr<Option> opt3(new Option(Option::V6, 7,
- buf,
- 3, // offset
- 5)); // 5 bytes of data
+ buf_.begin(),
+ buf_.begin() + 3);
+ OptionPtr opt2(new Option(Option::V6, 13));
+ OptionPtr opt3(new Option(Option::V6, 7,
+ buf_.begin() + 3,
+ buf_.begin() + 8));
opt1->addOption(opt2);
opt2->addOption(opt3);
// opt3 len = 9 4(header)+5(data)
@@ -219,25 +366,24 @@ TEST_F(OptionTest, suboptions2) {
0, 7, 0, 5, 103, 104, 105, 106, 107,
};
- int offset = opt1->pack(buf, 128, 20);
- EXPECT_EQ(40, offset);
+ opt1->pack(outBuf_);
+ EXPECT_EQ(20, outBuf_.getLength());
// payload
- EXPECT_EQ(0, memcmp(&buf[20], expected, 20) );
+ EXPECT_EQ(0, memcmp(outBuf_.getData(), expected, 20) );
EXPECT_NO_THROW(
delete opt1;
);
}
-TEST_F(OptionTest, addgetdel) {
- boost::shared_array<uint8_t> buf(new uint8_t[128]);
+TEST_F(OptionTest, v6_addgetdel) {
for (int i=0; i<128; i++)
- buf[i] = 100+i;
+ buf_[i] = 100+i;
Option* parent = new Option(Option::V6, 65535); //type
- boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
- boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
- boost::shared_ptr<Option> opt3(new Option(Option::V6, 2));
+ OptionPtr opt1(new Option(Option::V6, 1));
+ OptionPtr opt2(new Option(Option::V6, 2));
+ OptionPtr opt3(new Option(Option::V6, 2));
parent->addOption(opt1);
parent->addOption(opt2);
@@ -247,7 +393,7 @@ TEST_F(OptionTest, addgetdel) {
EXPECT_EQ(opt2, parent->getOption(2));
// expect NULL
- EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(4));
+ EXPECT_EQ(OptionPtr(), parent->getOption(4));
// now there are 2 options of type 2
parent->addOption(opt3);
@@ -256,28 +402,128 @@ TEST_F(OptionTest, addgetdel) {
EXPECT_EQ(true, parent->delOption(2));
// there still should be the other option 2
- EXPECT_NE(boost::shared_ptr<Option>(), parent->getOption(2));
+ EXPECT_NE(OptionPtr(), parent->getOption(2));
// let's delete the other option 2
EXPECT_EQ(true, parent->delOption(2));
// no more options with type=2
- EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(2));
+ EXPECT_EQ(OptionPtr(), parent->getOption(2));
// let's try to delete - should fail
EXPECT_TRUE(false == parent->delOption(2));
-}
+ delete parent;
}
-TEST_F(OptionTest, toText) {
- boost::shared_array<uint8_t> buf(new uint8_t[3]);
- buf[0] = 0;
- buf[1] = 0xf;
- buf[2] = 0xff;
+TEST_F(OptionTest, v6_toText) {
+ buf_[0] = 0;
+ buf_[1] = 0xf;
+ buf_[2] = 0xff;
- boost::shared_ptr<Option> opt(new Option(Option::V6, 258,
- buf, 0, 3));
+ OptionPtr opt(new Option(Option::V6, 258, buf_.begin(), buf_.begin() + 3 ));
EXPECT_EQ("type=258, len=3: 00:0f:ff", opt->toText());
}
+
+
+TEST_F(OptionTest, getUintX) {
+
+ buf_[0] = 0x5;
+ buf_[1] = 0x4;
+ buf_[2] = 0x3;
+ buf_[3] = 0x2;
+ buf_[4] = 0x1;
+
+ // five options with varying lengths
+ OptionPtr opt1(new Option(Option::V6, 258, buf_.begin(), buf_.begin() + 1));
+ OptionPtr opt2(new Option(Option::V6, 258, buf_.begin(), buf_.begin() + 2));
+ OptionPtr opt3(new Option(Option::V6, 258, buf_.begin(), buf_.begin() + 3));
+ OptionPtr opt4(new Option(Option::V6, 258, buf_.begin(), buf_.begin() + 4));
+ OptionPtr opt5(new Option(Option::V6, 258, buf_.begin(), buf_.begin() + 5));
+
+ EXPECT_EQ(5, opt1->getUint8());
+ EXPECT_THROW(opt1->getUint16(), OutOfRange);
+ EXPECT_THROW(opt1->getUint32(), OutOfRange);
+
+ EXPECT_EQ(5, opt2->getUint8());
+ EXPECT_EQ(0x0504, opt2->getUint16());
+ EXPECT_THROW(opt2->getUint32(), OutOfRange);
+
+ EXPECT_EQ(5, opt3->getUint8());
+ EXPECT_EQ(0x0504, opt3->getUint16());
+ EXPECT_THROW(opt3->getUint32(), OutOfRange);
+
+ EXPECT_EQ(5, opt4->getUint8());
+ EXPECT_EQ(0x0504, opt4->getUint16());
+ EXPECT_EQ(0x05040302, opt4->getUint32());
+
+ // the same as for 4-byte long, just get first 1,2 or 4 bytes
+ EXPECT_EQ(5, opt5->getUint8());
+ EXPECT_EQ(0x0504, opt5->getUint16());
+ EXPECT_EQ(0x05040302, opt5->getUint32());
+
+}
+
+TEST_F(OptionTest, setUintX) {
+ OptionPtr opt1(new Option(Option::V4, 125));
+ OptionPtr opt2(new Option(Option::V4, 125));
+ OptionPtr opt4(new Option(Option::V4, 125));
+
+ // verify setUint8
+ opt1->setUint8(255);
+ EXPECT_EQ(255, opt1->getUint8());
+ opt1->pack4(outBuf_);
+ EXPECT_EQ(3, opt1->len());
+ EXPECT_EQ(3, outBuf_.getLength());
+ uint8_t exp1[] = {125, 1, 255};
+ EXPECT_TRUE(0 == memcmp(exp1, outBuf_.getData(), 3));
+
+ // verify getUint16
+ outBuf_.clear();
+ opt2->setUint16(12345);
+ opt2->pack4(outBuf_);
+ EXPECT_EQ(12345, opt2->getUint16());
+ EXPECT_EQ(4, opt2->len());
+ EXPECT_EQ(4, outBuf_.getLength());
+ uint8_t exp2[] = {125, 2, 12345/256, 12345%256};
+ EXPECT_TRUE(0 == memcmp(exp2, outBuf_.getData(), 4));
+
+ // verify getUint32
+ outBuf_.clear();
+ opt4->setUint32(0x12345678);
+ opt4->pack4(outBuf_);
+ EXPECT_EQ(0x12345678, opt4->getUint32());
+ EXPECT_EQ(6, opt4->len());
+ EXPECT_EQ(6, outBuf_.getLength());
+ uint8_t exp4[] = {125, 4, 0x12, 0x34, 0x56, 0x78};
+ EXPECT_TRUE(0 == memcmp(exp4, outBuf_.getData(), 6));
+}
+
+TEST_F(OptionTest, setData) {
+ // verify data override with new buffer larger than
+ // initial option buffer size
+ OptionPtr opt1(new Option(Option::V4, 125,
+ buf_.begin(), buf_.begin() + 10));
+ buf_.resize(20, 1);
+ opt1->setData(buf_.begin(), buf_.end());
+ opt1->pack4(outBuf_);
+ ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size());
+ const uint8_t* test_data = static_cast<const uint8_t*>(outBuf_.getData());
+ EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(),
+ buf_.size()));
+
+ // verify data override with new buffer shorter than
+ // initial option buffer size
+ OptionPtr opt2(new Option(Option::V4, 125,
+ buf_.begin(), buf_.begin() + 10));
+ outBuf_.clear();
+ buf_.resize(5, 1);
+ opt2->setData(buf_.begin(), buf_.end());
+ opt2->pack4(outBuf_);
+ ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size());
+ test_data = static_cast<const uint8_t*>(outBuf_.getData());
+ EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(),
+ buf_.size()));
+}
+}
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index e7c4286..18208bd 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -24,54 +24,53 @@
#include <boost/static_assert.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
-
-#include "asiolink/io_address.h"
-#include "dhcp/pkt4.h"
-#include "dhcp/dhcp4.h"
-#include "exceptions/exceptions.h"
+#include <util/buffer.h>
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
using namespace isc::asiolink;
using namespace isc::dhcp;
-using namespace boost;
-
-// can't compare const to value directly, as it gives strange
-// linker errors in gtest.h
-
-static size_t DHCPV4_PKT_HDR_LEN = Pkt4::DHCPV4_PKT_HDR_LEN;
+using namespace isc::util;
+// don't import the entire boost namespace. It will unexpectedly hide uint8_t
+// for some systems.
+using boost::scoped_ptr;
namespace {
TEST(Pkt4Test, constructor) {
- ASSERT_EQ(236U, DHCPV4_PKT_HDR_LEN);
- Pkt4 * pkt = 0;
+ ASSERT_EQ(236U, static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) );
+ Pkt4* pkt = 0;
- // minimal
+ // Just some dummy payload.
uint8_t testData[250];
- for (int i=0 ; i < 250; i++)
- testData[i]=i;
+ for (int i = 0; i < 250; i++) {
+ testData[i]=i;
+ }
- // positive case1. Normal received packet
+ // Positive case1. Normal received packet.
EXPECT_NO_THROW(
- pkt = new Pkt4(testData, 236);
+ pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN);
);
- EXPECT_EQ(236, pkt->len());
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
EXPECT_NO_THROW(
delete pkt;
pkt = 0;
);
- // positive case2. Normal outgoing packet
+ // Positive case2. Normal outgoing packet.
EXPECT_NO_THROW(
pkt = new Pkt4(DHCPDISCOVER, 0xffffffff);
);
// DHCPv4 packet must be at least 236 bytes long
- EXPECT_EQ(DHCPV4_PKT_HDR_LEN, pkt->len());
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
EXPECT_EQ(DHCPDISCOVER, pkt->getType());
EXPECT_EQ(0xffffffff, pkt->getTransid());
EXPECT_NO_THROW(
@@ -79,32 +78,45 @@ TEST(Pkt4Test, constructor) {
pkt = 0;
);
- // negative case. Should drop truncated messages
+ // Negative case. Should drop truncated messages.
EXPECT_THROW(
- pkt = new Pkt4(testData, 235),
+ pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN-1),
OutOfRange
);
if (pkt) {
- // test failed anyway. Exception should have been thrown
+ // Test failed. Exception should have been thrown, but
+ // object was created instead. Let's clean this up.
delete pkt;
+ pkt = 0;
}
}
-// a sample transaction-id
-const static uint32_t dummyTransid = 0x12345678;
+// a sample data
+const uint8_t dummyOp = BOOTREQUEST;
+const uint8_t dummyHtype = 6;
+const uint8_t dummyHlen = 6;
+const uint8_t dummyHops = 13;
+const uint32_t dummyTransid = 0x12345678;
+const uint16_t dummySecs = 42;
+const uint16_t dummyFlags = BOOTP_BROADCAST;
+
+const IOAddress dummyCiaddr("192.0.2.1");
+const IOAddress dummyYiaddr("1.2.3.4");
+const IOAddress dummySiaddr("192.0.2.255");
+const IOAddress dummyGiaddr("255.255.255.255");
// a dummy MAC address
const uint8_t dummyMacAddr[] = {0, 1, 2, 3, 4, 5};
// a dummy MAC address, padded with 0s
-const uint8_t dummyChaddr[16] = {0, 1, 2, 3, 4, 5, 0, 0,
+const uint8_t dummyChaddr[16] = {0, 1, 2, 3, 4, 5, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 };
// let's use some creative test content here (128 chars + \0)
const uint8_t dummyFile[] = "Lorem ipsum dolor sit amet, consectetur "
"adipiscing elit. Proin mollis placerat metus, at "
"lacinia orci ornare vitae. Mauris amet.";
-
+
// yet another type of test content (64 chars + \0)
const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur "
"adipiscing elit posuere.";
@@ -112,7 +124,7 @@ const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur "
BOOST_STATIC_ASSERT(sizeof(dummyFile) == Pkt4::MAX_FILE_LEN + 1);
BOOST_STATIC_ASSERT(sizeof(dummySname) == Pkt4::MAX_SNAME_LEN + 1);
-/// Generates test packet
+/// @brief Generates test packet.
///
/// Allocates and generates test packet, with all fixed
/// fields set to non-zero values. Content is not always
@@ -126,63 +138,65 @@ boost::shared_ptr<Pkt4>
generateTestPacket1() {
boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPDISCOVER, dummyTransid));
+
+ vector<uint8_t> vectorMacAddr(dummyMacAddr, dummyMacAddr
+ +sizeof(dummyMacAddr));
+
// hwType = 6(ETHERNET), hlen = 6(MAC address len)
- pkt->setHWAddr(6, 6, dummyMacAddr);
- pkt->setHops(13); // 13 relays. Wow!
- // transaction-id is already set
- pkt->setSecs(42);
- pkt->setFlags( 0xffffU ); // all flags set
- pkt->setCiaddr(IOAddress("192.0.2.1"));
- pkt->setYiaddr(IOAddress("1.2.3.4"));
- pkt->setSiaddr(IOAddress("192.0.2.255"));
- pkt->setGiaddr(IOAddress("255.255.255.255"));
- // chaddr already set with setHWAddr()
- pkt->setSname(dummySname, 64);
+ pkt->setHWAddr(dummyHtype, dummyHlen, vectorMacAddr);
+ pkt->setHops(dummyHops); // 13 relays. Wow!
+ // Transaction-id is already set.
+ pkt->setSecs(dummySecs);
+ pkt->setFlags(dummyFlags); // all flags set
+ pkt->setCiaddr(dummyCiaddr);
+ pkt->setYiaddr(dummyYiaddr);
+ pkt->setSiaddr(dummySiaddr);
+ pkt->setGiaddr(dummyGiaddr);
+ // Chaddr already set with setHWAddr().
+ pkt->setSname(dummySname, 64);
pkt->setFile(dummyFile, 128);
return (pkt);
}
-/// Generates test packet
+/// @brief Generates test packet.
///
/// Allocates and generates on-wire buffer that represents
-/// test packet, with all fixed fields set to non-zero values.
+/// test packet, with all fixed fields set to non-zero values.
/// Content is not always reasonable.
///
/// See generateTestPacket1() function that returns
/// exactly the same packet as Pkt4 object.
///
/// @return pointer to allocated Pkt4 object
-shared_array<uint8_t>
+// Returns a vector containing a DHCPv4 packet header.
+vector<uint8_t>
generateTestPacket2() {
- shared_array<uint8_t> buf(new uint8_t[Pkt4::DHCPV4_PKT_HDR_LEN]);
-
- // that is only part of the header. It contains all "short" fields,
- // larger fields are constructed separately
+ // That is only part of the header. It contains all "short" fields,
+ // larger fields are constructed separately.
uint8_t hdr[] = {
- 1, 6, 6, 13, // op, htype, hlen, hops,
+ 1, 6, 6, 13, // op, htype, hlen, hops,
0x12, 0x34, 0x56, 0x78, // transaction-id
- 0, 42, 0xff, 0xff, // 42 secs, 0xffff flags
- 192, 0, 2, 1, // ciaddr
- 1, 2, 3, 4, // yiaddr
- 192, 0, 2, 255, // siaddr
- 255, 255, 255, 255, // giaddr
+ 0, 42, 0x80, 0x00, // 42 secs, BROADCAST flags
+ 192, 0, 2, 1, // ciaddr
+ 1, 2, 3, 4, // yiaddr
+ 192, 0, 2, 255, // siaddr
+ 255, 255, 255, 255, // giaddr
};
- BOOST_STATIC_ASSERT(28 == sizeof(hdr));
+ // Initialize the vector with the header fields defined above.
+ vector<uint8_t> buf(hdr, hdr + sizeof(hdr));
- size_t offset = 0;
+ // Append the large header fields.
+ copy(dummyChaddr, dummyChaddr + Pkt4::MAX_CHADDR_LEN, back_inserter(buf));
+ copy(dummySname, dummySname + Pkt4::MAX_SNAME_LEN, back_inserter(buf));
+ copy(dummyFile, dummyFile + Pkt4::MAX_FILE_LEN, back_inserter(buf));
- memcpy(&buf[0] + offset, hdr, sizeof(hdr));
- offset += sizeof(hdr);
-
- memcpy(&buf[0] + offset, dummyMacAddr, 6); // chaddr is 16 bytes
- offset += Pkt4::MAX_CHADDR_LEN;
- memcpy(&buf[0] + offset, dummySname, 64); // sname is 64 bytes
- offset += Pkt4::MAX_SNAME_LEN;
- memcpy(&buf[0] + offset, dummyFile, 128);
- offset += Pkt4::MAX_FILE_LEN;
+ // Should now have all the header, so check. The "static_cast" is used
+ // to get round an odd bug whereby the linker appears not to find the
+ // definition of DHCPV4_PKT_HDR_LEN if it appears within an EXPECT_EQ().
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), buf.size());
return (buf);
}
@@ -192,112 +206,133 @@ TEST(Pkt4Test, fixedFields) {
boost::shared_ptr<Pkt4> pkt = generateTestPacket1();
// ok, let's check packet values
- EXPECT_EQ(1, pkt->getOp());
- EXPECT_EQ(6, pkt->getHtype());
- EXPECT_EQ(6, pkt->getHlen());
- EXPECT_EQ(13, pkt->getHops());
+ EXPECT_EQ(dummyOp, pkt->getOp());
+ EXPECT_EQ(dummyHtype, pkt->getHtype());
+ EXPECT_EQ(dummyHlen, pkt->getHlen());
+ EXPECT_EQ(dummyHops, pkt->getHops());
EXPECT_EQ(dummyTransid, pkt->getTransid());
- EXPECT_EQ(42, pkt->getSecs());
- EXPECT_EQ(0xffff, pkt->getFlags());
+ EXPECT_EQ(dummySecs, pkt->getSecs());
+ EXPECT_EQ(dummyFlags, pkt->getFlags());
- EXPECT_EQ(string("192.0.2.1"), pkt->getCiaddr().toText());
- EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr().toText());
- EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr().toText());
- EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr().toText());
+ EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+ EXPECT_EQ(dummyYiaddr.toText(), pkt->getYiaddr().toText());
+ EXPECT_EQ(dummySiaddr.toText(), pkt->getSiaddr().toText());
+ EXPECT_EQ(dummyGiaddr.toText(), pkt->getGiaddr().toText());
// chaddr is always 16 bytes long and contains link-layer addr (MAC)
- EXPECT_FALSE( memcmp(dummyChaddr, pkt->getChaddr(), 16) );
+ EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), 16));
- EXPECT_FALSE( memcmp(dummySname, pkt->getSname(), 64) );
+ EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], 64));
- EXPECT_FALSE( memcmp(dummyFile, pkt->getFile(), 128) );
+ EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], 128));
EXPECT_EQ(DHCPDISCOVER, pkt->getType());
}
-#if 0
-/// TODO Uncomment when ticket #1227 is implemented
TEST(Pkt4Test, fixedFieldsPack) {
boost::shared_ptr<Pkt4> pkt = generateTestPacket1();
- shared_array<uint8_t> expectedFormat = generateTestPacket2();
+ vector<uint8_t> expectedFormat = generateTestPacket2();
EXPECT_NO_THROW(
pkt->pack();
);
- ASSERT_EQ(Pkt4::DHCPV4_PKT_HDR_LEN, pkt->len());
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+
+ // redundant but MUCH easier for debug in gdb
+ const uint8_t* exp = &expectedFormat[0];
+ const uint8_t* got = static_cast<const uint8_t*>(pkt->getBuffer().getData());
- EXPECT_EQ(0, memcmp(&expectedFormat[0], pkt->getData(), pkt->len()));
+ EXPECT_EQ(0, memcmp(exp, got, Pkt4::DHCPV4_PKT_HDR_LEN));
}
/// TODO Uncomment when ticket #1226 is implemented
TEST(Pkt4Test, fixedFieldsUnpack) {
- shared_array<uint8_t> expectedFormat = generateTestPkt2();
+ vector<uint8_t> expectedFormat = generateTestPacket2();
- boost::shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
- Pkt4::DHCPV4_PKT_HDR_LEN));
+ expectedFormat.push_back(0x63); // magic cookie
+ expectedFormat.push_back(0x82);
+ expectedFormat.push_back(0x53);
+ expectedFormat.push_back(0x63);
+
+ expectedFormat.push_back(0x35); // message-type
+ expectedFormat.push_back(0x1);
+ expectedFormat.push_back(0x1);
+
+ boost::shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+ expectedFormat.size()));;
+
+
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
// ok, let's check packet values
- EXPECT_EQ(1, pkt->getOp());
- EXPECT_EQ(6, pkt->getHtype());
- EXPECT_EQ(6, pkt->getHlen());
- EXPECT_EQ(13, pkt->getHops());
- EXPECT_EQ(transid, pkt->getTransid());
- EXPECT_EQ(42, pkt->getSecs());
- EXPECT_EQ(0xffff, pkt->getFlags());
-
- EXPECT_EQ(string("192.0.2.1"), pkt->getCiaddr.toText());
- EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr.toText());
- EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr.toText());
- EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr.toText());
+ EXPECT_EQ(dummyOp, pkt->getOp());
+ EXPECT_EQ(dummyHtype, pkt->getHtype());
+ EXPECT_EQ(dummyHlen, pkt->getHlen());
+ EXPECT_EQ(dummyHops, pkt->getHops());
+ EXPECT_EQ(dummyTransid, pkt->getTransid());
+ EXPECT_EQ(dummySecs, pkt->getSecs());
+ EXPECT_EQ(dummyFlags, pkt->getFlags());
+
+ EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+ EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr().toText());
+ EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr().toText());
+ EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr().toText());
// chaddr is always 16 bytes long and contains link-layer addr (MAC)
- EXPECT_FALSE( memcmp(expectedChaddr, pkt->getChaddr(), 16) );
+ EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), Pkt4::MAX_CHADDR_LEN));
- EXPECT_FALSE( memcmp(expectedSname, pkt->getSname(), 64) );
+ ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_SNAME_LEN), pkt->getSname().size());
+ EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
- EXPECT_FALSE( memcmp(expectedFile, pkt->getFile(), 128) );
+ ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_FILE_LEN), pkt->getFile().size());
+ EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
- EXPECT_EQ(DHCPSOLICIT, pkt->getType());
+ EXPECT_EQ(DHCPDISCOVER, pkt->getType());
}
-#endif
// this test is for hardware addresses (htype, hlen and chaddr fields)
TEST(Pkt4Test, hwAddr) {
- uint8_t mac[Pkt4::MAX_CHADDR_LEN];
+ vector<uint8_t> mac;
uint8_t expectedChaddr[Pkt4::MAX_CHADDR_LEN];
+ // We resize vector to specified length. It is more natural for fixed-length
+ // field, than clear it (shrink size to 0) and push_back each element
+ // (growing length back to MAX_CHADDR_LEN).
+ mac.resize(Pkt4::MAX_CHADDR_LEN);
+
Pkt4* pkt = 0;
// let's test each hlen, from 0 till 16
- for (int macLen=0; macLen < Pkt4::MAX_CHADDR_LEN; macLen++) {
- for (int i=0; i < Pkt4::MAX_CHADDR_LEN; i++) {
+ for (int macLen = 0; macLen < Pkt4::MAX_CHADDR_LEN; macLen++) {
+ for (int i = 0; i < Pkt4::MAX_CHADDR_LEN; i++) {
mac[i] = 0;
expectedChaddr[i] = 0;
}
- for (int i=0; i < macLen; i++) {
- mac[i] = 128+i;
- expectedChaddr[i] = 128+i;
+ for (int i = 0; i < macLen; i++) {
+ mac[i] = 128 + i;
+ expectedChaddr[i] = 128 + i;
}
-
+
// type and transaction doesn't matter in this test
pkt = new Pkt4(DHCPOFFER, 1234);
pkt->setHWAddr(255-macLen*10, // just weird htype
macLen,
mac);
- EXPECT_EQ(0, memcmp(expectedChaddr, pkt->getChaddr(),
+ EXPECT_EQ(0, memcmp(expectedChaddr, pkt->getChaddr(),
Pkt4::MAX_CHADDR_LEN));
-#if 0
- /// TODO Uncomment when ticket #1227 is implemented)
- EXPECT_NO_THROW(
+ EXPECT_NO_THROW(
pkt->pack();
);
// CHADDR starts at offset 28 in DHCP packet
- EXPECT_EQ(0, memcmp(pkt->getData()+28, expectedChaddr,
- Pkt4::MAX_CHADDR_LEN));
-#endif
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+28;
+
+ EXPECT_EQ(0, memcmp(ptr, expectedChaddr, Pkt4::MAX_CHADDR_LEN));
delete pkt;
}
@@ -329,7 +364,7 @@ TEST(Pkt4Test, msgTypes) {
};
Pkt4* pkt = 0;
- for (int i=0; i < sizeof(types)/sizeof(msgType); i++) {
+ for (int i = 0; i < sizeof(types) / sizeof(msgType); i++) {
pkt = new Pkt4(types[i].dhcp, 0);
EXPECT_EQ(types[i].dhcp, pkt->getType());
@@ -353,35 +388,31 @@ TEST(Pkt4Test, msgTypes) {
TEST(Pkt4Test, sname) {
uint8_t sname[Pkt4::MAX_SNAME_LEN];
- uint8_t expectedSname[Pkt4::MAX_SNAME_LEN];
Pkt4* pkt = 0;
// let's test each sname length, from 0 till 64
for (int snameLen=0; snameLen < Pkt4::MAX_SNAME_LEN; snameLen++) {
- for (int i=0; i < Pkt4::MAX_SNAME_LEN; i++) {
+ for (int i = 0; i < Pkt4::MAX_SNAME_LEN; i++) {
sname[i] = 0;
- expectedSname[i] = 0;
}
- for (int i=0; i < snameLen; i++) {
+ for (int i = 0; i < snameLen; i++) {
sname[i] = i;
- expectedSname[i] = i;
}
-
+
// type and transaction doesn't matter in this test
pkt = new Pkt4(DHCPOFFER, 1234);
pkt->setSname(sname, snameLen);
- EXPECT_EQ(0, memcmp(expectedSname, pkt->getSname(), Pkt4::MAX_SNAME_LEN));
+ EXPECT_EQ(0, memcmp(sname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
-#if 0
- /// TODO Uncomment when ticket #1227 is implemented)
- EXPECT_NO_THROW(
+ EXPECT_NO_THROW(
pkt->pack();
);
// SNAME starts at offset 44 in DHCP packet
- EXPECT_EQ(0, memcmp(pkt->getData()+44, expectedChaddr, Pkt4::MAX_SNAME_LEN));
-#endif
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+44;
+ EXPECT_EQ(0, memcmp(ptr, sname, Pkt4::MAX_SNAME_LEN));
delete pkt;
}
@@ -390,39 +421,215 @@ TEST(Pkt4Test, sname) {
TEST(Pkt4Test, file) {
uint8_t file[Pkt4::MAX_FILE_LEN];
- uint8_t expectedFile[Pkt4::MAX_FILE_LEN];
Pkt4* pkt = 0;
- // let's test each file length, from 0 till 64
- for (int fileLen=0; fileLen < Pkt4::MAX_FILE_LEN; fileLen++) {
- for (int i=0; i < Pkt4::MAX_FILE_LEN; i++) {
+ // Let's test each file length, from 0 till 128.
+ for (int fileLen = 0; fileLen < Pkt4::MAX_FILE_LEN; fileLen++) {
+ for (int i = 0; i < Pkt4::MAX_FILE_LEN; i++) {
file[i] = 0;
- expectedFile[i] = 0;
}
- for (int i=0; i < fileLen; i++) {
+ for (int i = 0; i < fileLen; i++) {
file[i] = i;
- expectedFile[i] = i;
}
-
- // type and transaction doesn't matter in this test
+
+ // Type and transaction doesn't matter in this test.
pkt = new Pkt4(DHCPOFFER, 1234);
pkt->setFile(file, fileLen);
- EXPECT_EQ(0, memcmp(expectedFile, pkt->getFile(), Pkt4::MAX_FILE_LEN));
+ EXPECT_EQ(0, memcmp(file, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
-#if 0
- /// TODO Uncomment when ticket #1227 is implemented)
- EXPECT_NO_THROW(
+ //
+ EXPECT_NO_THROW(
pkt->pack();
);
- // FILE starts at offset 44 in DHCP packet
- EXPECT_EQ(0, memcmp(pkt->getData()+44, expectedChaddr, Pkt4::MAX_FILE_LEN));
-#endif
+ // FILE starts at offset 108 in DHCP packet.
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+108;
+ EXPECT_EQ(0, memcmp(ptr, file, Pkt4::MAX_FILE_LEN));
delete pkt;
}
}
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 53, 1, 1, // DHCP_MESSAGE_TYPE (required to not throw exception during unpack)
+ 128, 3, 30, 31, 32,
+ 254, 3, 40, 41, 42,
+};
+
+TEST(Pkt4Test, options) {
+ Pkt4* pkt = new Pkt4(DHCPOFFER, 0);
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].push_back(i*10);
+ payload[i].push_back(i*10+1);
+ payload[i].push_back(i*10+2);
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> optMsgType(new Option(Option::V4, DHO_DHCP_MESSAGE_TYPE));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[3]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[4]));
+ optMsgType->setUint8(static_cast<uint8_t>(DHCPDISCOVER));
+
+ pkt->addOption(opt1);
+ pkt->addOption(opt2);
+ pkt->addOption(opt3);
+ pkt->addOption(opt4);
+ pkt->addOption(opt5);
+ pkt->addOption(optMsgType);
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+ EXPECT_FALSE(pkt->getOption(127)); // no such option
+
+ // options are unique in DHCPv4. It should not be possible
+ // to add more than one option of the same type.
+ EXPECT_THROW(
+ pkt->addOption(opt1),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ const OutputBuffer& buf = pkt->getBuffer();
+ // check that all options are stored, they should take sizeof(v4Opts),
+ // DHCP magic cookie (4 bytes), and OPTION_END added (just one byte)
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(DHCP_OPTIONS_COOKIE)
+ + sizeof(v4Opts) + 1, buf.getLength());
+
+ // that that this extra data actually contain our options
+ const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
+ ptr += Pkt4::DHCPV4_PKT_HDR_LEN + sizeof(DHCP_OPTIONS_COOKIE); // rewind to end of fixed part
+ EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+ EXPECT_EQ(DHO_END, static_cast<uint8_t>(*(ptr + sizeof(v4Opts))));
+
+ EXPECT_NO_THROW(
+ delete pkt;
+ );
+}
+
+TEST(Pkt4Test, unpackOptions) {
+
+ vector<uint8_t> expectedFormat = generateTestPacket2();
+
+ expectedFormat.push_back(0x63);
+ expectedFormat.push_back(0x82);
+ expectedFormat.push_back(0x53);
+ expectedFormat.push_back(0x63);
+
+ for (int i = 0; i < sizeof(v4Opts); i++) {
+ expectedFormat.push_back(v4Opts[i]);
+ }
+
+ // now expectedFormat contains fixed format and 5 options
+
+ boost::shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+ expectedFormat.size()));
+
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+
+ boost::shared_ptr<Option> x = pkt->getOption(12);
+ ASSERT_TRUE(x); // option 1 should exist
+ EXPECT_EQ(12, x->getType()); // this should be option 12
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = pkt->getOption(13);
+ ASSERT_TRUE(x); // option 13 should exist
+ EXPECT_EQ(13, x->getType()); // this should be option 13
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = pkt->getOption(14);
+ ASSERT_TRUE(x); // option 14 should exist
+ EXPECT_EQ(14, x->getType()); // this should be option 14
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = pkt->getOption(128);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(128, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+20, 3)); // data len=3
+
+ x = pkt->getOption(254);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(254, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+25, 3)); // data len=3
+}
+
+// This test verifies methods that are used for manipulating meta fields
+// i.e. fields that are not part of DHCPv4 (e.g. interface name).
+TEST(Pkt4Test, metaFields) {
+
+ Pkt4* pkt = new Pkt4(DHCPOFFER, 1234);
+ pkt->setIface("loooopback");
+ pkt->setIndex(42);
+ pkt->setRemoteAddr(IOAddress("1.2.3.4"));
+ pkt->setLocalAddr(IOAddress("4.3.2.1"));
+
+ EXPECT_EQ("loooopback", pkt->getIface());
+ EXPECT_EQ(42, pkt->getIndex());
+ EXPECT_EQ("1.2.3.4", pkt->getRemoteAddr().toText());
+ EXPECT_EQ("4.3.2.1", pkt->getLocalAddr().toText());
+
+ delete pkt;
+}
+
+TEST(Pkt4Test, Timestamp) {
+ scoped_ptr<Pkt4> pkt(new Pkt4(DHCPOFFER, 1234));
+
+ // Just after construction timestamp is invalid
+ ASSERT_TRUE(pkt->getTimestamp().is_not_a_date_time());
+
+ // Update packet time.
+ pkt->updateTimestamp();
+
+ // Get updated packet time.
+ boost::posix_time::ptime ts_packet = pkt->getTimestamp();
+
+ // After timestamp is updated it should be date-time.
+ ASSERT_FALSE(ts_packet.is_not_a_date_time());
+
+ // Check current time.
+ boost::posix_time::ptime ts_now =
+ boost::posix_time::microsec_clock::universal_time();
+
+ // Calculate period between packet time and now.
+ boost::posix_time::time_period ts_period(ts_packet, ts_now);
+
+ // Duration should be positive or zero.
+ EXPECT_TRUE(ts_period.length().total_microseconds() >= 0);
+}
+
+
+
} // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc
index c8ec049..cd6529a 100644
--- a/src/lib/dhcp/tests/pkt6_unittest.cc
+++ b/src/lib/dhcp/tests/pkt6_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -20,12 +20,13 @@
#else
#include <arpa/inet.h>
#endif
+#include <boost/date_time/posix_time/posix_time.hpp>
#include <gtest/gtest.h>
-#include "asiolink/io_address.h"
-#include "dhcp/option.h"
-#include "dhcp/pkt6.h"
-#include "dhcp/dhcp6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/dhcp6.h>
using namespace std;
using namespace isc;
@@ -41,56 +42,67 @@ public:
};
TEST_F(Pkt6Test, constructor) {
- Pkt6 * pkt1 = new Pkt6(17);
+ uint8_t data[] = { 0, 1, 2, 3, 4, 5 };
+ Pkt6 * pkt1 = new Pkt6(data, sizeof(data) );
- EXPECT_EQ(pkt1->data_len_, 17);
+ EXPECT_EQ(6, pkt1->getData().size());
+ EXPECT_EQ(0, memcmp( &pkt1->getData()[0], data, sizeof(data)));
delete pkt1;
}
-// captured actual SOLICIT packet: transid=0x3d79fb
-// options: client-id, in_na, dns-server, elapsed-time, option-request
-// this code is autogenerated (see src/bin/dhcp6/tests/iface_mgr_unittest.c)
-Pkt6 *capture1() {
+/// @brief returns captured actual SOLICIT packet
+///
+/// Captured SOLICIT packet with transid=0x3d79fb and options: client-id,
+/// in_na, dns-server, elapsed-time, option-request
+/// This code was autogenerated (see src/bin/dhcp6/tests/iface_mgr_unittest.c),
+/// but we spent some time to make is less ugly than it used to be.
+///
+/// @return pointer to Pkt6 that represents received SOLICIT
+Pkt6* capture1() {
Pkt6* pkt;
- pkt = new Pkt6(98);
- pkt->remote_port_ = 546;
- pkt->remote_addr_ = IOAddress("fe80::21e:8cff:fe9b:7349");
- pkt->local_port_ = 0;
- pkt->local_addr_ = IOAddress("ff02::1:2");
- pkt->ifindex_ = 2;
- pkt->iface_ = "eth0";
- pkt->data_[0]=1;
- pkt->data_[1]=01; pkt->data_[2]=02; pkt->data_[3]=03; pkt->data_[4]=0;
- pkt->data_[5]=1; pkt->data_[6]=0; pkt->data_[7]=14; pkt->data_[8]=0;
- pkt->data_[9]=1; pkt->data_[10]=0; pkt->data_[11]=1; pkt->data_[12]=21;
- pkt->data_[13]=158; pkt->data_[14]=60; pkt->data_[15]=22; pkt->data_[16]=0;
- pkt->data_[17]=30; pkt->data_[18]=140; pkt->data_[19]=155; pkt->data_[20]=115;
- pkt->data_[21]=73; pkt->data_[22]=0; pkt->data_[23]=3; pkt->data_[24]=0;
- pkt->data_[25]=40; pkt->data_[26]=0; pkt->data_[27]=0; pkt->data_[28]=0;
- pkt->data_[29]=1; pkt->data_[30]=255; pkt->data_[31]=255; pkt->data_[32]=255;
- pkt->data_[33]=255; pkt->data_[34]=255; pkt->data_[35]=255; pkt->data_[36]=255;
- pkt->data_[37]=255; pkt->data_[38]=0; pkt->data_[39]=5; pkt->data_[40]=0;
- pkt->data_[41]=24; pkt->data_[42]=32; pkt->data_[43]=1; pkt->data_[44]=13;
- pkt->data_[45]=184; pkt->data_[46]=0; pkt->data_[47]=1; pkt->data_[48]=0;
- pkt->data_[49]=0; pkt->data_[50]=0; pkt->data_[51]=0; pkt->data_[52]=0;
- pkt->data_[53]=0; pkt->data_[54]=0; pkt->data_[55]=0; pkt->data_[56]=18;
- pkt->data_[57]=52; pkt->data_[58]=255; pkt->data_[59]=255; pkt->data_[60]=255;
- pkt->data_[61]=255; pkt->data_[62]=255; pkt->data_[63]=255; pkt->data_[64]=255;
- pkt->data_[65]=255; pkt->data_[66]=0; pkt->data_[67]=23; pkt->data_[68]=0;
- pkt->data_[69]=16; pkt->data_[70]=32; pkt->data_[71]=1; pkt->data_[72]=13;
- pkt->data_[73]=184; pkt->data_[74]=0; pkt->data_[75]=1; pkt->data_[76]=0;
- pkt->data_[77]=0; pkt->data_[78]=0; pkt->data_[79]=0; pkt->data_[80]=0;
- pkt->data_[81]=0; pkt->data_[82]=0; pkt->data_[83]=0; pkt->data_[84]=221;
- pkt->data_[85]=221; pkt->data_[86]=0; pkt->data_[87]=8; pkt->data_[88]=0;
- pkt->data_[89]=2; pkt->data_[90]=0; pkt->data_[91]=100; pkt->data_[92]=0;
- pkt->data_[93]=6; pkt->data_[94]=0; pkt->data_[95]=2; pkt->data_[96]=0;
- pkt->data_[97]=23;
+ uint8_t data[98];
+ data[0] = 1;
+ data[1] = 1; data[2] = 2; data[3] = 3; data[4] = 0;
+ data[5] = 1; data[6] = 0; data[7] = 14; data[8] = 0;
+ data[9] = 1; data[10] = 0; data[11] = 1; data[12] = 21;
+ data[13] = 158; data[14] = 60; data[15] = 22; data[16] = 0;
+ data[17] = 30; data[18] = 140; data[19] = 155; data[20] = 115;
+ data[21] = 73; data[22] = 0; data[23] = 3; data[24] = 0;
+ data[25] = 40; data[26] = 0; data[27] = 0; data[28] = 0;
+ data[29] = 1; data[30] = 255; data[31] = 255; data[32] = 255;
+ data[33] = 255; data[34] = 255; data[35] = 255; data[36] = 255;
+ data[37] = 255; data[38] = 0; data[39] = 5; data[40] = 0;
+ data[41] = 24; data[42] = 32; data[43] = 1; data[44] = 13;
+ data[45] = 184; data[46] = 0; data[47] = 1; data[48] = 0;
+ data[49] = 0; data[50] = 0; data[51] = 0; data[52] = 0;
+ data[53] = 0; data[54] = 0; data[55] = 0; data[56] = 18;
+ data[57] = 52; data[58] = 255; data[59] = 255; data[60] = 255;
+ data[61] = 255; data[62] = 255; data[63] = 255; data[64] = 255;
+ data[65] = 255; data[66] = 0; data[67] = 23; data[68] = 0;
+ data[69] = 16; data[70] = 32; data[71] = 1; data[72] = 13;
+ data[73] = 184; data[74] = 0; data[75] = 1; data[76] = 0;
+ data[77] = 0; data[78] = 0; data[79] = 0; data[80] = 0;
+ data[81] = 0; data[82] = 0; data[83] = 0; data[84] = 221;
+ data[85] = 221; data[86] = 0; data[87] = 8; data[88] = 0;
+ data[89] = 2; data[90] = 0; data[91] = 100; data[92] = 0;
+ data[93] = 6; data[94] = 0; data[95] = 2; data[96] = 0;
+ data[97] = 23;
+
+ pkt = new Pkt6(data, sizeof(data));
+ pkt->setRemotePort(546);
+ pkt->setRemoteAddr(IOAddress("fe80::21e:8cff:fe9b:7349"));
+ pkt->setLocalPort(0);
+ pkt->setLocalAddr(IOAddress("ff02::1:2"));
+ pkt->setIndex(2);
+ pkt->setIface("eth0");
+
return (pkt);
}
+
TEST_F(Pkt6Test, unpack_solicit1) {
- Pkt6 * sol = capture1();
+ Pkt6* sol = capture1();
ASSERT_EQ(true, sol->unpack());
@@ -112,20 +124,16 @@ TEST_F(Pkt6Test, unpack_solicit1) {
EXPECT_FALSE(sol->getOption(D6O_IA_TA));
EXPECT_FALSE(sol->getOption(D6O_IAADDR));
- std::cout << sol->toText();
-
delete sol;
}
TEST_F(Pkt6Test, packUnpack) {
- Pkt6 * parent = new Pkt6(100);
-
- parent->setType(DHCPV6_SOLICIT);
+ Pkt6* parent = new Pkt6(DHCPV6_SOLICIT, 0x020304);
- boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
- boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
- boost::shared_ptr<Option> opt3(new Option(Option::V6, 100));
+ OptionPtr opt1(new Option(Option::V6, 1));
+ OptionPtr opt2(new Option(Option::V6, 2));
+ OptionPtr opt3(new Option(Option::V6, 100));
// let's not use zero-length option type 3 as it is IA_NA
parent->addOption(opt1);
@@ -133,47 +141,42 @@ TEST_F(Pkt6Test, packUnpack) {
parent->addOption(opt3);
EXPECT_EQ(DHCPV6_SOLICIT, parent->getType());
- int transid = parent->getTransid();
- // transaction-id was randomized, let's remember it
// calculated length should be 16
- EXPECT_EQ( Pkt6::DHCPV6_PKT_HDR_LEN + 3*Option::OPTION6_HDR_LEN,
- parent->len() );
+ EXPECT_EQ(Pkt6::DHCPV6_PKT_HDR_LEN + 3 * Option::OPTION6_HDR_LEN,
+ parent->len());
- EXPECT_TRUE( parent->pack() );
+ EXPECT_TRUE(parent->pack());
- //
- EXPECT_EQ( Pkt6::DHCPV6_PKT_HDR_LEN + 3*Option::OPTION6_HDR_LEN,
- parent->len() );
+ EXPECT_EQ(Pkt6::DHCPV6_PKT_HDR_LEN + 3 * Option::OPTION6_HDR_LEN,
+ parent->len());
- // let's delete options from options_ collection
- // they still be defined in packed
- parent->options_.clear();
-
- // that that removed options are indeed are gone
- EXPECT_EQ( 4, parent->len() );
+ // create second packet,based on assembled data from the first one
+ Pkt6* clone = new Pkt6(static_cast<const uint8_t*>(parent->getBuffer().getData()),
+ parent->getBuffer().getLength());
// now recreate options list
- EXPECT_TRUE( parent->unpack() );
+ EXPECT_TRUE( clone->unpack() );
// transid, message-type should be the same as before
- EXPECT_EQ(transid, parent->getTransid());
- EXPECT_EQ(DHCPV6_SOLICIT, parent->getType());
-
- EXPECT_TRUE( parent->getOption(1));
- EXPECT_TRUE( parent->getOption(2));
- EXPECT_TRUE( parent->getOption(100));
- EXPECT_FALSE( parent->getOption(4));
-
+ EXPECT_EQ(parent->getTransid(), parent->getTransid());
+ EXPECT_EQ(DHCPV6_SOLICIT, clone->getType());
+
+ EXPECT_TRUE( clone->getOption(1));
+ EXPECT_TRUE( clone->getOption(2));
+ EXPECT_TRUE( clone->getOption(100));
+ EXPECT_FALSE( clone->getOption(4));
+
delete parent;
+ delete clone;
}
TEST_F(Pkt6Test, addGetDelOptions) {
- Pkt6 * parent = new Pkt6(100);
+ Pkt6* parent = new Pkt6(DHCPV6_SOLICIT, random() );
- boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
- boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
- boost::shared_ptr<Option> opt3(new Option(Option::V6, 2));
+ OptionPtr opt1(new Option(Option::V6, 1));
+ OptionPtr opt2(new Option(Option::V6, 2));
+ OptionPtr opt3(new Option(Option::V6, 2));
parent->addOption(opt1);
parent->addOption(opt2);
@@ -183,7 +186,7 @@ TEST_F(Pkt6Test, addGetDelOptions) {
EXPECT_EQ(opt2, parent->getOption(2));
// expect NULL
- EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(4));
+ EXPECT_EQ(OptionPtr(), parent->getOption(4));
// now there are 2 options of type 2
parent->addOption(opt3);
@@ -192,13 +195,13 @@ TEST_F(Pkt6Test, addGetDelOptions) {
EXPECT_EQ(true, parent->delOption(2));
// there still should be the other option 2
- EXPECT_NE(boost::shared_ptr<Option>(), parent->getOption(2));
+ EXPECT_NE(OptionPtr(), parent->getOption(2));
// let's delete the other option 2
EXPECT_EQ(true, parent->delOption(2));
// no more options with type=2
- EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(2));
+ EXPECT_EQ(OptionPtr(), parent->getOption(2));
// let's try to delete - should fail
EXPECT_TRUE(false == parent->delOption(2));
@@ -206,5 +209,30 @@ TEST_F(Pkt6Test, addGetDelOptions) {
delete parent;
}
+TEST_F(Pkt6Test, Timestamp) {
+ boost::scoped_ptr<Pkt6> pkt(new Pkt6(DHCPV6_SOLICIT, 0x020304));
+
+ // Just after construction timestamp is invalid
+ ASSERT_TRUE(pkt->getTimestamp().is_not_a_date_time());
+
+ // Update packet time.
+ pkt->updateTimestamp();
+
+ // Get updated packet time.
+ boost::posix_time::ptime ts_packet = pkt->getTimestamp();
+
+ // After timestamp is updated it should be date-time.
+ ASSERT_FALSE(ts_packet.is_not_a_date_time());
+
+ // Check current time.
+ boost::posix_time::ptime ts_now =
+ boost::posix_time::microsec_clock::universal_time();
+
+ // Calculate period between packet time and now.
+ boost::posix_time::time_period ts_period(ts_packet, ts_now);
+
+ // Duration should be positive or zero.
+ EXPECT_TRUE(ts_period.length().total_microseconds() >= 0);
+}
}
diff --git a/src/lib/python/.gitignore b/src/lib/python/.gitignore
new file mode 100644
index 0000000..9252d05
--- /dev/null
+++ b/src/lib/python/.gitignore
@@ -0,0 +1 @@
+/bind10_config.py
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index 5924294..e3ae4b5 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -1,16 +1,9 @@
SUBDIRS = isc
-python_PYTHON = bind10_config.py
+nodist_python_PYTHON = bind10_config.py
pythondir = $(pyexecdir)
-# Explicitly define DIST_COMMON so ${python_PYTHON} is not included
-# as we don't want the generated file included in distributed tarfile.
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in bind10_config.py.in
-
-# When setting DIST_COMMON, then need to add the .in file too.
-EXTRA_DIST = bind10_config.py.in
-
-CLEANFILES = bind10_config.pyc
+CLEANFILES = bind10_config.pyc bind10_config.pyo
CLEANDIRS = __pycache__
clean-local:
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index 69b17ed..b8975cf 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -23,20 +23,37 @@ def reload():
global DATA_PATH
global PLUGIN_PATHS
global PREFIX
+ global LIBEXECPATH
BIND10_MSGQ_SOCKET_FILE = os.path.join("@localstatedir@",
"@PACKAGE_NAME@",
"msgq_socket").replace("${prefix}",
"@prefix@")
PREFIX = "@prefix@"
- # If B10_FROM_SOURCE is set in the environment, we use data files
- # from a directory relative to the value of that variable, or, if defined,
- # relative to the value of B10_FROM_SOURCE_LOCALSTATEDIR. Otherwise
- # we use the ones installed on the system.
+ # B10_FROM_SOURCE is set in the environment for internal tests and
+ # an experimental run without installagion. In that case we need to
+ # specialize some configuration variables, generally so that they refer
+ # to somewhere in the source tree instead of the appropriate places
+ # after installation.
+ #
+ # DATA_PATH: used by the config manager to find configuration files.
+ # When "FROM_SOURCE", we use data files from a directory relative to the
+ # value of that variable, or, if defined, relative to the value of
+ # B10_FROM_SOURCE_LOCALSTATEDIR. Otherwise we use the ones installed on
+ # the system.
+ # PLUGIN_PATHS: configuration modules that are not associated to specific
+ # process
+ # LIBEXECPATH: Paths to programs invoked by the boss process
+ # The boss process (directly or via a helper module) uses this as
+ # the prefererred PATH before starting a child process.
+ # When "FROM_SOURCE", it lists the directories where the programs are
+ # built so that when BIND 10 is experimentally started on the source
+ # tree the programs in the tree (not installed ones) will be used.
+ #
# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
# tests where we want to use variuos types of configuration within the test
- # environment. (We may want to make it even more generic so that the path is
- # passed from the boss process)
+ # environment. (We may want to make it even more generic so that the path
+ # is passed from the boss process)
if "B10_FROM_SOURCE" in os.environ:
if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
@@ -44,9 +61,17 @@ def reload():
DATA_PATH = os.environ["B10_FROM_SOURCE"]
PLUGIN_PATHS = [os.environ["B10_FROM_SOURCE"] +
'/src/bin/cfgmgr/plugins']
+ programdirs = ['auth', 'cfgmgr', 'cmdctl', 'ddns', 'dhcp6', 'msgq',
+ 'resolver', 'sockcreator', 'stats', 'xfrin', 'xfrout',
+ 'zonemgr']
+ LIBEXECPATH = ':'.join(['@abs_top_builddir@/src/bin/' + p for p in
+ programdirs])
else:
DATA_PATH = "@localstatedir@/@PACKAGE@".replace("${prefix}", PREFIX)
PLUGIN_PATHS = ["@prefix@/share/@PACKAGE@/config_plugins"]
+ LIBEXECPATH = ("@libexecdir@/@PACKAGE@"). \
+ replace("${exec_prefix}", "@exec_prefix@"). \
+ replace("${prefix}", "@prefix@")
# For testing the plugins so they can find their own spec files
if "B10_TEST_PLUGIN_DIR" in os.environ:
PLUGIN_PATHS = os.environ["B10_TEST_PLUGIN_DIR"].split(':')
diff --git a/src/lib/python/bind10_config.py.win32 b/src/lib/python/bind10_config.py.win32
index d239044..4a0679e 100644
--- a/src/lib/python/bind10_config.py.win32
+++ b/src/lib/python/bind10_config.py.win32
@@ -23,20 +23,37 @@ def reload():
global DATA_PATH
global PLUGIN_PATHS
global PREFIX
+ global LIBEXECPATH
BIND10_MSGQ_SOCKET_FILE = os.path.join("c:/Temp",
"bind10-devel",
"msgq_socket").replace("${prefix}",
"/Temp")
PREFIX = "/Temp"
- # If B10_FROM_SOURCE is set in the environment, we use data files
- # from a directory relative to the value of that variable, or, if defined,
- # relative to the value of B10_FROM_SOURCE_LOCALSTATEDIR. Otherwise
- # we use the ones installed on the system.
+ # B10_FROM_SOURCE is set in the environment for internal tests and
+ # an experimental run without installagion. In that case we need to
+ # specialize some configuration variables, generally so that they refer
+ # to somewhere in the source tree instead of the appropriate places
+ # after installation.
+ #
+ # DATA_PATH: used by the config manager to find configuration files.
+ # When "FROM_SOURCE", we use data files from a directory relative to the
+ # value of that variable, or, if defined, relative to the value of
+ # B10_FROM_SOURCE_LOCALSTATEDIR. Otherwise we use the ones installed on
+ # the system.
+ # PLUGIN_PATHS: configuration modules that are not associated to specific
+ # process
+ # LIBEXECPATH: Paths to programs invoked by the boss process
+ # The boss process (directly or via a helper module) uses this as
+ # the prefererred PATH before starting a child process.
+ # When "FROM_SOURCE", it lists the directories where the programs are
+ # built so that when BIND 10 is experimentally started on the source
+ # tree the programs in the tree (not installed ones) will be used.
+ #
# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
# tests where we want to use variuos types of configuration within the test
- # environment. (We may want to make it even more generic so that the path is
- # passed from the boss process)
+ # environment. (We may want to make it even more generic so that the path
+ # is passed from the boss process)
if "B10_FROM_SOURCE" in os.environ:
if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
@@ -44,9 +61,18 @@ def reload():
DATA_PATH = os.environ["B10_FROM_SOURCE"]
PLUGIN_PATHS = [os.environ["B10_FROM_SOURCE"] +
'/src/bin/cfgmgr/plugins']
+ programdirs = ['auth', 'cfgmgr', 'cmdctl', 'ddns', 'dhcp6', 'msgq',
+ 'resolver', 'sockcreator', 'stats', 'xfrin', 'xfrout',
+ 'zonemgr']
+ BIND10HOME = os.environ["BIND10HOME"]
+ LIBEXECPATH = ':'.join([BIND10HOME + '/src/bin/' + p for p in
+ programdirs])
else:
DATA_PATH = "c:/Temp/bind10-devel".replace("${prefix}", PREFIX)
PLUGIN_PATHS = ["/Temp/share/bind10-devel/config_plugins"]
+ LIBEXECPATH = ("@libexecdir@/@PACKAGE@"). \
+ replace("${exec_prefix}", "@exec_prefix@"). \
+ replace("${prefix}", "@prefix@")
# For testing the plugins so they can find their own spec files
if "B10_TEST_PLUGIN_DIR" in os.environ:
PLUGIN_PATHS = os.environ["B10_TEST_PLUGIN_DIR"].split(':')
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index a3e74c5..80fd222 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,5 +1,5 @@
SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
-SUBDIRS += xfrin log_messages
+SUBDIRS += xfrin log_messages server_common ddns
python_PYTHON = __init__.py
diff --git a/src/lib/python/isc/acl/Makefile.am b/src/lib/python/isc/acl/Makefile.am
index b1afa15..b9a0c81 100644
--- a/src/lib/python/isc/acl/Makefile.am
+++ b/src/lib/python/isc/acl/Makefile.am
@@ -26,11 +26,11 @@ _dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
# Python prefers .so, while some OSes (specifically MacOS) use a different
# suffix for dynamic objects. -module is necessary to work this around.
-acl_la_LDFLAGS += -module
+acl_la_LDFLAGS += -module -avoid-version
acl_la_LIBADD = $(top_builddir)/src/lib/acl/libacl.la
acl_la_LIBADD += $(PYTHON_LIB)
-_dns_la_LDFLAGS += -module
+_dns_la_LDFLAGS += -module -avoid-version
_dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
_dns_la_LIBADD += $(PYTHON_LIB)
diff --git a/src/lib/python/isc/acl/tests/dns_test.py b/src/lib/python/isc/acl/tests/dns_test.py
index 7ee3023..d225bee 100644
--- a/src/lib/python/isc/acl/tests/dns_test.py
+++ b/src/lib/python/isc/acl/tests/dns_test.py
@@ -321,7 +321,7 @@ class RequestACLTest(unittest.TestCase):
' "from": "192.0.2.0/24"},' +
' {"action": "DROP",' +
' "from": "2001:db8::1"},' +
- '] }')
+ ']')
self.assertEqual(ACCEPT, acl.execute(CONTEXT4))
self.assertEqual(REJECT, acl.execute(get_context('192.0.2.2')))
self.assertEqual(DROP, acl.execute(get_context('2001:db8::1')))
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index 43a7605..aa5d0ab 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -1,4 +1,5 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py sockcreator.py
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
+ socket_cache.py
pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
new file mode 100644
index 0000000..da2730c
--- /dev/null
+++ b/src/lib/python/isc/bind10/component.py
@@ -0,0 +1,673 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Module for managing components (abstraction of process). It allows starting
+them in given order, handling when they crash (what happens depends on kind
+of component) and shutting down. It also handles the configuration of this.
+
+Dependencies between them are not yet handled. It might turn out they are
+needed, in that case they will be added sometime in future.
+
+This framework allows for a single process to be started multiple times (by
+specifying multiple components with the same configuration). We might want
+to add a more convenient support (like providing a count argument to the
+configuration). This is yet to be designed.
+"""
+
+import isc.log
+from isc.log_messages.bind10_messages import *
+import time
+import os
+import signal
+
+logger = isc.log.Logger("boss")
+DBG_TRACE_DATA = 20
+DBG_TRACE_DETAILED = 80
+
+START_CMD = 'start'
+STOP_CMD = 'stop'
+
+STARTED_OK_TIME = 10
+COMPONENT_RESTART_DELAY = 10
+
+STATE_DEAD = 'dead'
+STATE_STOPPED = 'stopped'
+STATE_RUNNING = 'running'
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "unknown signal"
+
+class BaseComponent:
+ """
+ This represents a single component. This one is an abstract base class.
+ There are some methods which should be left untouched, but there are
+ others which define the interface only and should be overriden in
+ concrete implementations.
+
+ The component is in one of the three states:
+ - Stopped - it is either not started yet or it was explicitly stopped.
+ The component is created in this state (it must be asked to start
+ explicitly).
+ - Running - after start() was called, it started successfully and is
+ now running.
+ - Dead - it failed and can not be resurrected.
+
+ Init
+ | stop()
+ | +-----------------------+
+ | | |
+ v | start() success |
+ Stopped --------+--------> Running <----------+
+ | | |
+ |failure | failed() |
+ | | |
+ v | |
+ +<-----------+ |
+ | |
+ | kind == dispensable or kind|== needed and failed late
+ +-----------------------------+
+ |
+ | kind == core or kind == needed and it failed too soon
+ v
+ Dead
+
+ Note that there are still situations which are not handled properly here.
+ We don't recognize a component that is starting up, but not ready yet, one
+ that is already shutting down, impossible to stop, etc. We need to add more
+ states in future to handle it properly.
+ """
+ def __init__(self, boss, kind):
+ """
+ Creates the component in not running mode.
+
+ The parameters are:
+ - `boss` the boss object to plug into. The component needs to plug
+ into it to know when it failed, etc.
+ - `kind` is the kind of component. It may be one of:
+ * 'core' means the system can't run without it and it can't be
+ safely restarted. If it does not start, the system is brought
+ down. If it crashes, the system is turned off as well (with
+ non-zero exit status).
+ * 'needed' means the system is able to restart the component,
+ but it is vital part of the service (like auth server). If
+ it fails to start or crashes in less than 10s after the first
+ startup, the system is brought down. If it crashes later on,
+ it is restarted (see below).
+ * 'dispensable' means the component should be running, but if it
+ doesn't start or crashes for some reason, the system simply tries
+ to restart it and keeps running.
+
+ For components that are restarted, the restarts are not always
+ immediate; if the component has run for more than
+ COMPONENT_RESTART_DELAY (10) seconds, they are restarted right
+ away. If the component has not run that long, the system waits
+ until that time has passed (since the last start) until the
+ component is restarted.
+
+ Note that the __init__ method of child class should have these
+ parameters:
+
+ __init__(self, process, boss, kind, address=None, params=None)
+
+ The extra parameters are:
+ - `process` - which program should be started.
+ - `address` - the address on message bus, used to talk to the
+ component.
+ - `params` - parameters to the program.
+
+ The methods you should not override are:
+ - start
+ - stop
+ - failed
+ - running
+
+ You should override:
+ - _start_internal
+ - _stop_internal
+ - _failed_internal (if you like, the empty default might be suitable)
+ - name
+ - pid
+ - kill
+ """
+ if kind not in ['core', 'needed', 'dispensable']:
+ raise ValueError('Component kind can not be ' + kind)
+ self.__state = STATE_STOPPED
+ self._kind = kind
+ self._boss = boss
+ self._original_start_time = None
+
+ def start(self):
+ """
+ Start the component for the first time or restart it. It runs
+ _start_internal to actually start the component.
+
+ If you try to start an already running component, it raises ValueError.
+ """
+ if self.__state == STATE_DEAD:
+ raise ValueError("Can't resurrect already dead component")
+ if self.running():
+ raise ValueError("Can't start already running component")
+ logger.info(BIND10_COMPONENT_START, self.name())
+ self.__state = STATE_RUNNING
+ self.__start_time = time.time()
+ if self._original_start_time is None:
+ self._original_start_time = self.__start_time
+ self._restart_time = None
+ try:
+ self._start_internal()
+ except Exception as e:
+ logger.error(BIND10_COMPONENT_START_EXCEPTION, self.name(), e)
+ self.failed(None)
+ raise
+
+ def stop(self):
+ """
+ Stop the component. It calls _stop_internal to do the actual
+ stopping.
+
+ If you try to stop a component that is not running, it raises
+ ValueError.
+ """
+ # This is not tested. It talks with the outher world, which is out
+ # of scope of unittests.
+ if not self.running():
+ raise ValueError("Can't stop a component which is not running")
+ logger.info(BIND10_COMPONENT_STOP, self.name())
+ self.__state = STATE_STOPPED
+ self._stop_internal()
+
+ def failed(self, exit_code):
+ """
+ Notify the component it crashed. This will be called from boss object.
+
+ If you try to call failed on a component that is not running,
+ a ValueError is raised.
+
+ If it is a core component or needed component and it was started only
+ recently, the component will become dead and will ask the boss to shut
+ down with error exit status. A dead component can't be started again.
+
+ Otherwise the component will try to restart.
+
+ The exit code is used for logging. It might be None.
+
+ It calls _failed_internal internally.
+
+ Returns True if the process was immediately restarted, returns
+ False is the process was not restarted, either because
+ it is considered a core or needed component, or because
+ the component is to be restarted later.
+ """
+
+ if exit_code is not None:
+ if os.WIFEXITED(exit_code):
+ exit_str = "process exited normally with exit status %d" % (exit_code)
+ elif os.WIFSIGNALED(exit_code):
+ sig = os.WTERMSIG(exit_code)
+ signame = get_signame(sig)
+ if os.WCOREDUMP(exit_code):
+ exit_str = "process dumped core with exit status %d (killed by signal %d: %s)" % (exit_code, sig, signame)
+ else:
+ exit_str = "process terminated with exit status %d (killed by signal %d: %s)" % (exit_code, sig, signame)
+ else:
+ exit_str = "unknown condition with exit status %d" % (exit_code)
+ else:
+ exit_str = "unknown condition"
+
+ logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
+ exit_str)
+ if not self.running():
+ raise ValueError("Can't fail component that isn't running")
+ self.__state = STATE_STOPPED
+ self._failed_internal()
+ # If it is a core component or the needed component failed to start
+ # (including it stopped really soon)
+ if self._kind == 'core' or \
+ (self._kind == 'needed' and time.time() - STARTED_OK_TIME <
+ self._original_start_time):
+ self.__state = STATE_DEAD
+ logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
+ self._boss.component_shutdown(1)
+ return False
+ # This means we want to restart
+ else:
+ # if the component was only running for a short time, don't
+ # restart right away, but set a time it wants to restarted,
+ # and return that it wants to be restarted later
+ self.set_restart_time()
+ return self.restart()
+
+ def set_restart_time(self):
+ """Calculates and sets the time this component should be restarted.
+ Currently, it uses a very basic algorithm; start time +
+ RESTART_DELAY (10 seconds). This algorithm may be improved upon
+ in the future.
+ """
+ self._restart_at = self.__start_time + COMPONENT_RESTART_DELAY
+
+ def get_restart_time(self):
+ """Returns the time at which this component should be restarted."""
+ return self._restart_at
+
+ def restart(self, now = None):
+ """Restarts the component if it has a restart_time and if the value
+ of the restart_time is smaller than 'now'.
+
+ If the parameter 'now' is given, its value will be used instead
+ of calling time.time().
+
+ Returns True if the component is restarted, False if not."""
+ if now is None:
+ now = time.time()
+ if self.get_restart_time() is not None and\
+ self.get_restart_time() < now:
+ self.start()
+ return True
+ else:
+ return False
+
+ def running(self):
+ """
+ Informs if the component is currently running. It assumes the failed
+ is called whenever the component really fails and there might be some
+ time in between actual failure and the call, so this might be
+ inaccurate (it corresponds to the thing the object thinks is true, not
+ to the real "external" state).
+
+ It is not expected for this method to be overriden.
+ """
+ return self.__state == STATE_RUNNING
+
+ def _start_internal(self):
+ """
+ This method does the actual starting of a process. You need to override
+ this method to do the actual starting.
+
+ The ability to override this method presents some flexibility. It
+ allows processes started in a strange way, as well as components that
+ have no processes at all or components with multiple processes (in case
+ of multiple processes, care should be taken to make their
+ started/stopped state in sync and all the processes that can fail
+ should be registered).
+
+ You should register all the processes created by calling
+ self._boss.register_process.
+ """
+ pass
+
+ def _stop_internal(self):
+ """
+ This is the method that does the actual stopping of a component.
+ You need to provide it in a concrete implementation.
+
+ Also, note that it is a bad idea to raise exceptions from here.
+ Under such circumstance, the component will be considered stopped,
+ and the exception propagated, but we can't be sure it really is
+ dead.
+ """
+ pass
+
+ def _failed_internal(self):
+ """
+ This method is called from failed. You can replace it if you need
+ some specific behaviour when the component crashes. The default
+ implementation is empty.
+
+ Do not raise exceptions from here, please. The propper shutdown
+ would have not happened.
+ """
+ pass
+
+ def name(self):
+ """
+ Provides human readable name of the component, for logging and similar
+ purposes.
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+ def pid(self):
+ """
+ Provides a PID of a process, if the component is real running process.
+ This may return None in cases when there's no process involved with the
+ component or in case the component is not started yet.
+
+ However, it is expected the component preserves the pid after it was
+ stopped, to ensure we can log it when we ask it to be killed (in case
+ the process refused to stop willingly).
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+ def kill(self, forceful=False):
+ """
+ Kills the component.
+
+ If forcefull is true, it should do it in more direct and aggressive way
+ (for example by using SIGKILL or some equivalent). If it is false, more
+ peaceful way should be used (SIGTERM or equivalent).
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+class Component(BaseComponent):
+ """
+ The most common implementation of a component. It can be used either
+ directly, and it will just start the process without anything special,
+ or slightly customised by passing a start_func hook to the __init__
+ to change the way it starts.
+
+ If such customisation isn't enough, you should inherit BaseComponent
+ directly. It is not recommended to override methods of this class
+ on one-by-one basis.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None,
+ start_func=None):
+ """
+ Creates the component in not running mode.
+
+ The parameters are:
+ - `process` is the name of the process to start.
+ - `boss` the boss object to plug into. The component needs to plug
+ into it to know when it failed, etc.
+ - `kind` is the kind of component. Refer to the documentation of
+ BaseComponent for details.
+ - `address` is the address on message bus. It is used to ask it to
+ shut down at the end. If you specialize the class for a component
+ that is shut down differently, it might be None.
+ - `params` is a list of parameters to pass to the process when it
+ starts. It is currently unused and this support is left out for
+ now.
+ - `start_func` is a function called when it is started. It is supposed
+ to start up the process and return a ProcInfo object describing it.
+ There's a sensible default if not provided, which just launches
+ the program without any special care.
+ """
+ BaseComponent.__init__(self, boss, kind)
+ self._process = process
+ self._start_func = start_func
+ self._address = address
+ self._params = params
+ self._procinfo = None
+
+ def _start_internal(self):
+ """
+ You can change the "core" of this function by setting self._start_func
+ to a function without parameters. Such function should start the
+ process and return the procinfo object describing the running process.
+
+ If you don't provide the _start_func, the usual startup by calling
+ boss.start_simple is performed.
+ """
+ # This one is not tested. For one, it starts a real process
+ # which is out of scope of unit tests, for another, it just
+ # delegates the starting to other function in boss (if a derived
+ # class does not provide an override function), which is tested
+ # by use.
+ if self._start_func is not None:
+ procinfo = self._start_func()
+ else:
+ # TODO Handle params, etc
+ procinfo = self._boss.start_simple(self._process)
+ self._procinfo = procinfo
+ self._boss.register_process(self.pid(), self)
+
+ def _stop_internal(self):
+ self._boss.stop_process(self._process, self._address, self.pid())
+ # TODO Some way to wait for the process that doesn't want to
+ # terminate and kill it would prove nice (or add it to boss somewhere?)
+
+ def name(self):
+ """
+ Returns the name, derived from the process name.
+ """
+ return self._process
+
+ def pid(self):
+ return self._procinfo.pid if self._procinfo is not None else None
+
+ def kill(self, forcefull=False):
+ if self._procinfo is not None:
+ if forcefull:
+ self._procinfo.process.kill()
+ else:
+ self._procinfo.process.terminate()
+
+class Configurator:
+ """
+ This thing keeps track of configuration changes and starts and stops
+ components as it goes. It also handles the inital startup and final
+ shutdown.
+
+ Note that this will allow you to stop (by invoking reconfigure) a core
+ component. There should be some kind of layer protecting users from ever
+ doing so (users must not stop the config manager, message queue and stuff
+ like that or the system won't start again). However, if a user specifies
+ b10-auth as core, it is safe to stop that one.
+
+ The parameters are:
+ * `boss`: The boss we are managing for.
+ * `specials`: Dict of specially started components. Each item is a class
+ representing the component.
+
+ The configuration passed to it (by startup() and reconfigure()) is a
+ dictionary, each item represents one component that should be running.
+ The key is an unique identifier used to reference the component. The
+ value is a dictionary describing the component. All items in the
+ description is optional unless told otherwise and they are as follows:
+ * `special` - Some components are started in a special way. If it is
+ present, it specifies which class from the specials parameter should
+ be used to create the component. In that case, some of the following
+ items might be irrelevant, depending on the special component chosen.
+ If it is not there, the basic Component class is used.
+ * `process` - Name of the executable to start. If it is not present,
+ it defaults to the identifier of the component.
+ * `kind` - The kind of component, either of 'core', 'needed' and
+ 'dispensable'. This specifies what happens if the component fails.
+ This one is required.
+ * `address` - The address of the component on message bus. It is used
+ to shut down the component. All special components currently either
+ know their own address or don't need one and ignore it. The common
+ components should provide this.
+ * `params` - The command line parameters of the executable. Defaults
+ to no parameters. It is currently unused.
+ * `priority` - When starting the component, the components with higher
+ priority are started before the ones with lower priority. If it is
+ not present, it defaults to 0.
+ """
+ def __init__(self, boss, specials = {}):
+ """
+ Initializes the configurator, but nothing is started yet.
+
+ The boss parameter is the boss object used to start and stop processes.
+ """
+ self.__boss = boss
+ # These could be __private, but as we access them from within unittest,
+ # it's more comfortable to have them just _protected.
+
+ # They are tuples (configuration, component)
+ self._components = {}
+ self._running = False
+ self.__specials = specials
+
+ def __reconfigure_internal(self, old, new):
+ """
+ Does a switch from one configuration to another.
+ """
+ self._run_plan(self._build_plan(old, new))
+
+ def startup(self, configuration):
+ """
+ Starts the first set of processes. This configuration is expected
+ to be hardcoded from the boss itself to start the configuration
+ manager and other similar things.
+ """
+ if self._running:
+ raise ValueError("Trying to start the component configurator " +
+ "twice")
+ logger.info(BIND10_CONFIGURATOR_START)
+ self.__reconfigure_internal(self._components, configuration)
+ self._running = True
+
+ def shutdown(self):
+ """
+ Shuts everything down.
+
+ It is not expected that anyone would want to shutdown and then start
+ the configurator again, so we don't explicitly make sure that would
+ work. However, we are not aware of anything that would make it not
+ work either.
+ """
+ if not self._running:
+ raise ValueError("Trying to shutdown the component " +
+ "configurator while it's not yet running")
+ logger.info(BIND10_CONFIGURATOR_STOP)
+ self._running = False
+ self.__reconfigure_internal(self._components, {})
+
+ def reconfigure(self, configuration):
+ """
+ Changes configuration from the current one to the provided. It
+ starts and stops all the components as needed (eg. if there's
+ a component that was not in the original configuration, it is
+ started, any component that was in the old and is not in the
+ new one is stopped).
+ """
+ if not self._running:
+ raise ValueError("Trying to reconfigure the component " +
+ "configurator while it's not yet running")
+ logger.info(BIND10_CONFIGURATOR_RECONFIGURE)
+ self.__reconfigure_internal(self._components, configuration)
+
+ def _build_plan(self, old, new):
+ """
+ Builds a plan how to transfer from the old configuration to the new
+ one. It'll be sorted by priority and it will contain the components
+ (already created, but not started). Each command in the plan is a dict,
+ so it can be extended any time in future to include whatever
+ parameters each operation might need.
+
+ Any configuration problems are expected to be handled here, so the
+ plan is not yet run.
+ """
+ logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_BUILD, old, new)
+ plan = []
+ # Handle removals of old components
+ for cname in old.keys():
+ if cname not in new:
+ component = self._components[cname][1]
+ if component.running():
+ plan.append({
+ 'command': STOP_CMD,
+ 'component': component,
+ 'name': cname
+ })
+ # Handle transitions of configuration of what is here
+ for cname in new.keys():
+ if cname in old:
+ for option in ['special', 'process', 'kind', 'address',
+ 'params']:
+ if new[cname].get(option) != old[cname][0].get(option):
+ raise NotImplementedError('Changing configuration of' +
+ ' a running component is ' +
+ 'not yet supported. Remove' +
+ ' and re-add ' + cname +
+ ' to get the same effect')
+ # Handle introduction of new components
+ plan_add = []
+ for cname in new.keys():
+ if cname not in old:
+ component_config = new[cname]
+ creator = Component
+ if 'special' in component_config:
+ # TODO: Better error handling
+ creator = self.__specials[component_config['special']]
+ component = creator(component_config.get('process', cname),
+ self.__boss, component_config['kind'],
+ component_config.get('address'),
+ component_config.get('params'))
+ priority = component_config.get('priority', 0)
+ # We store tuples, priority first, so we can easily sort
+ plan_add.append((priority, {
+ 'component': component,
+ 'command': START_CMD,
+ 'name': cname,
+ 'config': component_config
+ }))
+ # Push the starts there sorted by priority
+ plan.extend([command for (_, command) in sorted(plan_add,
+ reverse=True,
+ key=lambda command:
+ command[0])])
+ return plan
+
+ def running(self):
+ """
+ Returns if the configurator is running (eg. was started by startup and
+ not yet stopped by shutdown).
+ """
+ return self._running
+
+ def _run_plan(self, plan):
+ """
+ Run a plan, created beforehand by _build_plan.
+
+ With the start and stop commands, it also adds and removes components
+ in _components.
+
+ Currently implemented commands are:
+ * start
+ * stop
+
+ The plan is a list of tasks, each task is a dictionary. It must contain
+ at last 'component' (a component object to work with) and 'command'
+ (the command to do). Currently, both existing commands need 'name' of
+ the component as well (the identifier from configuration). The 'start'
+ one needs the 'config' to be there, which is the configuration description
+ of the component.
+ """
+ done = 0
+ try:
+ logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_RUN, len(plan))
+ for task in plan:
+ component = task['component']
+ command = task['command']
+ logger.debug(DBG_TRACE_DETAILED, BIND10_CONFIGURATOR_TASK,
+ command, component.name())
+ if command == START_CMD:
+ component.start()
+ self._components[task['name']] = (task['config'],
+ component)
+ elif command == STOP_CMD:
+ if component.running():
+ component.stop()
+ del self._components[task['name']]
+ else:
+ # Can Not Happen (as the plans are generated by ourselves).
+ # Therefore not tested.
+ raise NotImplementedError("Command unknown: " + command)
+ done += 1
+ except:
+ logger.error(BIND10_CONFIGURATOR_PLAN_INTERRUPTED, done, len(plan))
+ raise
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 8e5b019..c681d07 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -16,7 +16,9 @@
import socket
import struct
import os
+import copy
import subprocess
+import copy
from isc.log_messages.bind10_messages import *
from libutil_io_python import recv_fd
@@ -200,6 +202,9 @@ class WrappedSocket:
class Creator(Parser):
"""
This starts the socket creator and allows asking for the sockets.
+
+ Note: __process shouldn't be reset once created. See the note
+ of the SockCreator class for details.
"""
def __init__(self, path):
(local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -207,15 +212,24 @@ class Creator(Parser):
# stdin as well as stdout, so we dup it before passing it there.
remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
socket.SOCK_STREAM)
- env = os.environ
+ env = copy.deepcopy(os.environ)
env['PATH'] = path
self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
stdin=remote.fileno(),
- stdout=remote2.fileno())
+ stdout=remote2.fileno(),
+ preexec_fn=self.__preexec_work)
remote.close()
remote2.close()
Parser.__init__(self, WrappedSocket(local))
+ def __preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # Put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+
def pid(self):
return self.__process.pid
@@ -223,4 +237,3 @@ class Creator(Parser):
logger.warn(BIND10_SOCKCREATOR_KILL)
if self.__process is not None:
self.__process.kill()
- self.__process = None
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
new file mode 100644
index 0000000..d6c1175
--- /dev/null
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Here's the cache for sockets from socket creator.
+"""
+
+import os
+import random
+import isc.bind10.sockcreator
+from copy import copy
+
+class SocketError(Exception):
+ """
+ Exception raised when the socket creator is unable to create requested
+ socket. Possible reasons might be the address it should be bound to
+ is already taken, the permissions are insufficient, the address family
+ is not supported on this computer and many more.
+
+ The errno, if not None, is passed from the socket creator.
+ """
+ def __init__(self, message, errno):
+ Exception.__init__(self, message)
+ self.errno = errno
+
+class ShareError(Exception):
+ """
+ The requested socket is already taken by other component and the sharing
+ parameters don't allow sharing with the new request.
+ """
+ pass
+
+class Socket:
+ """
+ This represents one socket cached by the cache program. This should never
+ be used directly by a user, it is used internally by the Cache. Therefore
+ many member variables are used directly instead of by a accessor method.
+
+ Be warned that this object implements the __del__ method. It closes the
+ socket held inside in it. But this poses various problems with garbage
+ collector. In short, do not make reference cycles with this and generally
+ leave this class alone to live peacefully.
+ """
+ def __init__(self, protocol, address, port, fileno):
+ """
+ Creates the socket.
+
+ The protocol, address and port are preserved for the information.
+ """
+ self.protocol = protocol
+ self.address = address
+ self.port = port
+ self.fileno = fileno
+ # Mapping from token -> application
+ self.active_tokens = {}
+ # The tokens which were not yet picked up
+ self.waiting_tokens = set()
+ # Share modes and names by the tokens (token -> (mode, name))
+ self.shares = {}
+
+ def __del__(self):
+ """
+ Closes the file descriptor.
+ """
+ os.close(self.fileno)
+
+ def share_compatible(self, mode, name):
+ """
+ Checks if the given share mode and name is compatible with the ones
+ already installed here.
+
+ The allowed values for mode are listed in the Cache.get_token
+ function.
+ """
+ if mode not in ['NO', 'SAMEAPP', 'ANY']:
+ raise ValueError("Mode " + mode + " is invalid")
+
+ # Go through the existing ones
+ for (emode, ename) in self.shares.values():
+ if emode == 'NO' or mode == 'NO':
+ # One of them can't live together with anything
+ return False
+ if (emode == 'SAMEAPP' or mode == 'SAMEAPP') and \
+ ename != name:
+ # One of them can't live together with someone of different
+ # name
+ return False
+ # else both are ANY or SAMEAPP with the same name, which is OK
+ # No problem found, so we consider it OK
+ return True
+
+class Cache:
+ """
+ This is the cache for sockets from socket creator. The purpose of cache
+ is to hold the sockets that were requested, until they are no longer
+ needed. One reason is, the socket is created before it is sent over the
+ unix domain socket in boss, so we need to keep it somewhere for a while.
+
+ The other reason is, a single socket might be requested multiple times.
+ So we keep it here in case someone else might ask for it.
+
+ Each socket kept here has a reference count and when it drops to zero,
+ it is removed from cache and closed.
+
+ This is expected to be part of Boss, it is not a general utility class.
+
+ It is not expected to be subclassed. The methods and members are named
+ as protected so tests are easier access into them.
+ """
+ def __init__(self, creator):
+ """
+ Initialization. The creator is the socket creator object
+ (isc.bind10.sockcreator.Creator) which will be used to create yet
+ uncached sockets.
+ """
+ self._creator = creator
+ # The sockets we have live here, these dicts are various ways how
+ # to get them. Each of them contains the Socket objects somehow
+
+ # This one is dict of token: socket for the ones that were not yet
+ # picked up by an application.
+ self._waiting_tokens = {}
+ # This format is the same as above, but for the tokens that were
+ # already picked up by the application and not yet released.
+ self._active_tokens = {}
+ # This is a dict from applications to set of tokens used by the
+ # application, for the sockets already picked up by an application
+ self._active_apps = {}
+ # The sockets live here to be indexed by protocol, address and
+ # subsequently by port
+ self._sockets = {}
+ # These are just the tokens actually in use, so we don't generate
+ # dupes. If one is dropped, it can be potentially reclaimed.
+ self._live_tokens = set()
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ This requests a token representing a socket. The socket is either
+ found in the cache already or requested from the creator at this time
+ (and cached for later time).
+
+ The parameters are:
+ - protocol: either 'UDP' or 'TCP'
+ - address: the IPAddr object representing the address to bind to
+ - port: integer saying which port to bind to
+ - share_mode: either 'NO', 'SAMEAPP' or 'ANY', specifying how the
+ socket can be shared with others. See bin/bind10/creatorapi.txt
+ for details.
+ - share_name: the name of application, in case of 'SAMEAPP' share
+ mode. Only requests with the same name can share the socket.
+
+ If the call is successful, it returns a string token which can be
+ used to pick up the socket later. The socket is created with reference
+ count zero and if it isn't picked up soon enough (the time yet has to
+ be set), it will be removed and the token is invalid.
+
+ It can fail in various ways. Explicitly listed exceptions are:
+ - SocketError: this one is thrown if the socket creator couldn't provide
+ the socket and it is not yet cached (it belongs to other application,
+ for example).
+ - ShareError: the socket is already in the cache, but it can't be
+ shared due to share_mode and share_name combination (both the request
+ restrictions and of all copies of socket handed out are considered,
+ so it can be raised even if you call it with share_mode 'ANY').
+ - isc.bind10.sockcreator.CreatorError: fatal creator errors are
+ propagated. Thay should cause the boss to exit if ever encountered.
+
+ Note that it isn't guaranteed the tokens would be unique and they
+ should be used as an opaque handle only.
+ """
+ addr_str = str(address)
+ try:
+ socket = self._sockets[protocol][addr_str][port]
+ except KeyError:
+ # Something in the dicts is not there, so socket is to be
+ # created
+ try:
+ fileno = self._creator.get_socket(address, port, protocol)
+ except isc.bind10.sockcreator.CreatorError as ce:
+ if ce.fatal:
+ raise
+ else:
+ raise SocketError(str(ce), ce.errno)
+ socket = Socket(protocol, address, port, fileno)
+ # And cache it
+ if protocol not in self._sockets:
+ self._sockets[protocol] = {}
+ if addr_str not in self._sockets[protocol]:
+ self._sockets[protocol][addr_str] = {}
+ self._sockets[protocol][addr_str][port] = socket
+ # Now we get the token, check it is compatible
+ if not socket.share_compatible(share_mode, share_name):
+ raise ShareError("Cached socket not compatible with mode " +
+ share_mode + " and name " + share_name)
+ # Grab yet unused token
+ token = 't' + str(random.randint(0, 2 ** 32-1))
+ while token in self._live_tokens:
+ token = 't' + str(random.randint(0, 2 ** 32-1))
+ self._waiting_tokens[token] = socket
+ self._live_tokens.add(token)
+ socket.shares[token] = (share_mode, share_name)
+ socket.waiting_tokens.add(token)
+ return token
+
+ def get_socket(self, token, application):
+ """
+ This returns the socket created by get_token. The token should be the
+ one returned from previous call from get_token. The token can be used
+ only once to receive the socket.
+
+ The application is a token representing the application that requested
+ it. Currently, boss uses the file descriptor of connection from the
+ application, but anything which can be a key in a dict is OK from the
+ cache's point of view. You just need to use the same thing in
+ drop_application.
+
+ In case the token is considered invalid (it doesn't come from the
+ get_token, it was already used, the socket wasn't picked up soon
+ enough, ...), it raises ValueError.
+ """
+ try:
+ socket = self._waiting_tokens[token]
+ except KeyError:
+ raise ValueError("Token " + token +
+ " isn't waiting to be picked up")
+ del self._waiting_tokens[token]
+ self._active_tokens[token] = socket
+ if application not in self._active_apps:
+ self._active_apps[application] = set()
+ self._active_apps[application].add(token)
+ socket.waiting_tokens.remove(token)
+ socket.active_tokens[token] = application
+ return socket.fileno
+
+ def drop_socket(self, token):
+ """
+ This signals the application no longer uses the socket which was
+ requested by the given token. It decreases the reference count for
+ the socket and closes and removes the cached copy if it was the last
+ one.
+
+ It raises ValueError if the token doesn't exist.
+ """
+ try:
+ socket = self._active_tokens[token]
+ except KeyError:
+ raise ValueError("Token " + token + " doesn't represent an " +
+ "active socket")
+ # Now, remove everything from the bookkeeping
+ del socket.shares[token]
+ app = socket.active_tokens[token]
+ del socket.active_tokens[token]
+ del self._active_tokens[token]
+ self._active_apps[app].remove(token)
+ if len(self._active_apps[app]) == 0:
+ del self._active_apps[app]
+ self._live_tokens.remove(token)
+ # The socket is not used by anything now, so remove it
+ if len(socket.active_tokens) == 0 and len(socket.waiting_tokens) == 0:
+ addr = str(socket.address)
+ port = socket.port
+ proto = socket.protocol
+ del self._sockets[proto][addr][port]
+ # Clean up empty branches of the structure
+ if len(self._sockets[proto][addr]) == 0:
+ del self._sockets[proto][addr]
+ if len(self._sockets[proto]) == 0:
+ del self._sockets[proto]
+
+ def drop_application(self, application):
+ """
+ This signals the application terminated and all sockets it picked up
+ should be considered unused by it now. It effectively calls drop_socket
+ on each of the sockets the application picked up and didn't drop yet.
+
+ If the application is invalid (no get_socket was successful with this
+ value of application), it raises ValueError.
+ """
+ try:
+ # Get a copy. Who knows how iteration works through sets if we
+ # delete from it during the time, so we'll just have our own copy
+ # to iterate
+ to_drop = copy(self._active_apps[application])
+ except KeyError:
+ raise ValueError("Application " + str(application) +
+ " doesn't hold any sockets")
+ for token in to_drop:
+ self.drop_socket(token)
+ # We don't call del now. The last drop_socket should have
+ # removed the application key as well.
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
new file mode 100644
index 0000000..688ccf5
--- /dev/null
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -0,0 +1,133 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from isc.bind10.component import Component, BaseComponent
+import isc.bind10.sockcreator
+from bind10_config import LIBEXECPATH
+import os
+import posix
+import isc.log
+from isc.log_messages.bind10_messages import *
+
+logger = isc.log.Logger("boss")
+
+class SockCreator(BaseComponent):
+ """
+ The socket creator component. Will start and stop the socket creator
+ accordingly.
+
+ Note: _creator shouldn't be reset explicitly once created. The
+ underlying Popen object would then wait() the child process internally,
+ which breaks the assumption of the boss, who is expecting to see
+ the process die in waitpid().
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+ self.__creator = None
+ self.__uid = boss.uid
+ self.__gid = boss.gid
+
+ def _start_internal(self):
+ self._boss.curproc = 'b10-sockcreator'
+ self.__creator = isc.bind10.sockcreator.Creator(LIBEXECPATH + ':' +
+ os.environ['PATH'])
+ self._boss.register_process(self.pid(), self)
+ self._boss.set_creator(self.__creator)
+ self._boss.log_started(self.pid())
+ if self.__gid is not None:
+ logger.info(BIND10_SETGID, self.__gid)
+ posix.setgid(self.__gid)
+ if self.__uid is not None:
+ logger.info(BIND10_SETUID, self.__uid)
+ posix.setuid(self.__uid)
+
+ def _stop_internal(self):
+ self.__creator.terminate()
+
+ def name(self):
+ return "Socket creator"
+
+ def pid(self):
+ """
+ Pid of the socket creator. It is provided differently from a usual
+ component.
+ """
+ return self.__creator.pid() if self.__creator else None
+
+ def kill(self, forceful=False):
+ # We don't really care about forceful here
+ if self.__creator:
+ self.__creator.kill()
+
+class Msgq(Component):
+ """
+ The message queue. Starting is passed to boss, stopping is not supported
+ and we leave the boss kill it by signal.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, None, None,
+ boss.start_msgq)
+
+ def _stop_internal(self):
+ """
+ We can't really stop the message queue, as many processes may need
+ it for their shutdown and it doesn't have a shutdown command anyway.
+ But as it is stateless, it's OK to kill it.
+
+ So we disable this method (as the only time it could be called is
+ during shutdown) and wait for the boss to kill it in the next shutdown
+ step.
+
+ This actually breaks the recommendation at Component we shouldn't
+ override its methods one by one. This is a special case, because
+ we don't provide a different implementation, we completely disable
+ the method by providing an empty one. This can't hurt the internals.
+ """
+ pass
+
+class CfgMgr(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'ConfigManager',
+ None, boss.start_cfgmgr)
+
+class Auth(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Auth', None,
+ boss.start_auth)
+
+class Resolver(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Resolver', None,
+ boss.start_resolver)
+
+class CmdCtl(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Cmdctl', None,
+ boss.start_cmdctl)
+def get_specials():
+ """
+ List of specially started components. Each one should be the class than can
+ be created for that component.
+ """
+ return {
+ 'sockcreator': SockCreator,
+ 'msgq': Msgq,
+ 'cfgmgr': CfgMgr,
+ # TODO: Should these be replaced by configuration in config manager only?
+ # They should not have any parameters anyway
+ 'auth': Auth,
+ 'resolver': Resolver,
+ 'cmdctl': CmdCtl
+ }
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index df8ab30..196a8b9 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = sockcreator_test.py
+PYTESTS = sockcreator_test.py component_test.py socket_cache_test.py
EXTRA_DIST = $(PYTESTS)
@@ -23,6 +23,7 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
new file mode 100644
index 0000000..af529f8
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -0,0 +1,1071 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Tests for the isc.bind10.component module and the
+isc.bind10.special_component module.
+"""
+
+import unittest
+import isc.log
+import time
+import copy
+from isc.bind10.component import Component, Configurator, BaseComponent
+import isc.bind10.special_component
+
+class TestError(Exception):
+ """
+ Just a private exception not known to anybody we use for our tests.
+ """
+ pass
+
+class BossUtils:
+ """
+ A class that brings some utilities for pretending we're Boss.
+ This is expected to be inherited by the testcases themselves.
+ """
+ def setUp(self):
+ """
+ Part of setup. Should be called by descendant's setUp.
+ """
+ self._shutdown = False
+ self._exitcode = None
+ # Back up the time function, we may want to replace it with something
+ self.__orig_time = isc.bind10.component.time.time
+
+ def tearDown(self):
+ """
+ Clean up after tests. If the descendant implements a tearDown, it
+ should call this method internally.
+ """
+ # Return the original time function
+ isc.bind10.component.time.time = self.__orig_time
+
+ def component_shutdown(self, exitcode=0):
+ """
+ Mock function to shut down. We just note we were asked to do so.
+ """
+ self._shutdown = True
+ self._exitcode = exitcode
+
+ def _timeskip(self):
+ """
+ Skip in time to future some 30s. Implemented by replacing the
+ time.time function in the tested module with function that returns
+ current time increased by 30.
+ """
+ tm = time.time()
+ isc.bind10.component.time.time = lambda: tm + 30
+
+ # Few functions that pretend to start something. Part of pretending of
+ # being boss.
+ def start_msgq(self):
+ pass
+
+ def start_cfgmgr(self):
+ pass
+
+ def start_auth(self):
+ pass
+
+ def start_resolver(self):
+ pass
+
+ def start_cmdctl(self):
+ pass
+
+class ComponentTests(BossUtils, unittest.TestCase):
+ """
+ Tests for the bind10.component.Component class
+ """
+ def setUp(self):
+ """
+ Pretend a newly started system.
+ """
+ BossUtils.setUp(self)
+ self._shutdown = False
+ self._exitcode = None
+ self.__start_called = False
+ self.__stop_called = False
+ self.__failed_called = False
+ self.__registered_processes = {}
+ self.__stop_process_params = None
+ self.__start_simple_params = None
+ # Pretending to be boss
+ self.gid = None
+ self.__gid_set = None
+ self.uid = None
+ self.__uid_set = None
+
+ def __start(self):
+ """
+ Mock function, installed into the component into _start_internal.
+ This only notes the component was "started".
+ """
+ self.__start_called = True
+
+ def __stop(self):
+ """
+ Mock function, installed into the component into _stop_internal.
+ This only notes the component was "stopped".
+ """
+ self.__stop_called = True
+
+ def __fail(self):
+ """
+ Mock function, installed into the component into _failed_internal.
+ This only notes the component called the method.
+ """
+ self.__failed_called = True
+
+ def __fail_to_start(self):
+ """
+ Mock function. It can be installed into the component's _start_internal
+ to simulate a component that fails to start by raising an exception.
+ """
+ orig_started = self.__start_called
+ self.__start_called = True
+ if not orig_started:
+ # This one is from restart. Avoid infinite recursion for now.
+ # FIXME: We should use the restart scheduler to avoid it, not this.
+ raise TestError("Test error")
+
+ def __create_component(self, kind):
+ """
+ Convenience function that creates a component of given kind
+ and installs the mock functions into it so we can hook up into
+ its behaviour.
+
+ The process used is some nonsense, as this isn't used in this
+ kind of tests and we pretend to be the boss.
+ """
+ component = Component('No process', self, kind, 'homeless', [])
+ component._start_internal = self.__start
+ component._stop_internal = self.__stop
+ component._failed_internal = self.__fail
+ return component
+
+ def test_name(self):
+ """
+ Test the name provides whatever we passed to the constructor as process.
+ """
+ component = self.__create_component('core')
+ self.assertEqual('No process', component.name())
+
+ def test_guts(self):
+ """
+ Test the correct data are stored inside the component.
+ """
+ component = self.__create_component('core')
+ self.assertEqual(self, component._boss)
+ self.assertEqual("No process", component._process)
+ self.assertEqual(None, component._start_func)
+ self.assertEqual("homeless", component._address)
+ self.assertEqual([], component._params)
+
+ def __check_startup(self, component):
+ """
+ Check that nothing was called yet. A newly created component should
+ not get started right away, so this should pass after the creation.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertFalse(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertFalse(component.running())
+ # We can't stop or fail the component yet
+ self.assertRaises(ValueError, component.stop)
+ self.assertRaises(ValueError, component.failed, 1)
+
+ def __check_started(self, component):
+ """
+ Check the component was started, but not stopped anyhow yet.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertTrue(component.running())
+
+ def __check_dead(self, component):
+ """
+ Check the component is completely dead, and the server too.
+ """
+ self.assertTrue(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertEqual(1, self._exitcode)
+ self.assertFalse(component.running())
+ # Surely it can't be stopped when already dead
+ self.assertRaises(ValueError, component.stop)
+ # Nor started
+ self.assertRaises(ValueError, component.start)
+ # Nor it can fail again
+ self.assertRaises(ValueError, component.failed, 1)
+
+ def __check_restarted(self, component):
+ """
+ Check the component restarted successfully.
+
+ Reset the self.__start_called to False before calling the function when
+ the component should fail.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertTrue(component.running())
+ # Check it can't be started again
+ self.assertRaises(ValueError, component.start)
+
+ def __check_not_restarted(self, component):
+ """
+ Check the component has not (yet) restarted successfully.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertFalse(component.running())
+
+ def __do_start_stop(self, kind):
+ """
+ This is a body of a test. It creates a component of given kind,
+ then starts it and stops it. It checks correct functions are called
+ and the component's status is correct.
+
+ It also checks the component can't be started/stopped twice.
+ """
+ # Create it and check it did not do any funny stuff yet
+ component = self.__create_component(kind)
+ self.__check_startup(component)
+ # Start it and check it called the correct starting functions
+ component.start()
+ self.__check_started(component)
+ # Check it can't be started twice
+ self.assertRaises(ValueError, component.start)
+ # Stop it again and check
+ component.stop()
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertTrue(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertFalse(component.running())
+ # Check it can't be stopped twice
+ self.assertRaises(ValueError, component.stop)
+ # Or failed
+ self.assertRaises(ValueError, component.failed, 1)
+ # But it can be started again if it is stopped
+ # (no more checking here, just it doesn't crash)
+ component.start()
+
+ def test_start_stop_core(self):
+ """
+ A start-stop test for core component. See do_start_stop.
+ """
+ self.__do_start_stop('core')
+
+ def test_start_stop_needed(self):
+ """
+ A start-stop test for needed component. See do_start_stop.
+ """
+ self.__do_start_stop('needed')
+
+ def test_start_stop_dispensable(self):
+ """
+ A start-stop test for dispensable component. See do_start_stop.
+ """
+ self.__do_start_stop('dispensable')
+
+ def test_start_fail_core(self):
+ """
+ Start and then fail a core component. It should stop the whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Pretend the component died
+ restarted = component.failed(1)
+ # Since it is a core component, it should not be restarted
+ self.assertFalse(restarted)
+ # It should bring down the whole server
+ self.__check_dead(component)
+
+ def test_start_fail_core_later(self):
+ """
+ Start and then fail a core component, but let it be running for longer time.
+ It should still stop the whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ self._timeskip()
+ # Pretend the component died some time later
+ restarted = component.failed(1)
+ # Should not be restarted
+ self.assertFalse(restarted)
+ # Check the component is still dead
+ self.__check_dead(component)
+
+ def test_start_fail_needed(self):
+ """
+ Start and then fail a needed component. As this happens really soon after
+ being started, it is considered failure to start and should bring down the
+ whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail right away.
+ restarted = component.failed(1)
+ # Should not have restarted
+ self.assertFalse(restarted)
+ self.__check_dead(component)
+
+ def test_start_fail_needed_later(self):
+ """
+ Start and then fail a needed component. But the failure is later on, so
+ we just restart it and will be happy.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail later on
+ self.__start_called = False
+ self._timeskip()
+ restarted = component.failed(1)
+ # Should have restarted
+ self.assertTrue(restarted)
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable(self):
+ """
+ Start and then fail a dispensable component. Should not get restarted.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail right away
+ restarted = component.failed(1)
+ # Should signal that it did not restart
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+
+ def test_start_fail_dispensable_later(self):
+ """
+ Start and then later on fail a dispensable component. Should just get
+ restarted.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail later on
+ self._timeskip()
+ restarted = component.failed(1)
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable_restart_later(self):
+ """
+ Start and then fail a dispensable component, wait a bit and try to
+ restart. Should get restarted after the wait.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail immediately
+ restarted = component.failed(1)
+ # should signal that it did not restart
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+ self._timeskip()
+ # try to restart again
+ restarted = component.restart()
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ def test_fail_core(self):
+ """
+ Failure to start a core component. Should bring the system down
+ and the exception should get through.
+ """
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_dead(component)
+
+ def test_fail_needed(self):
+ """
+ Failure to start a needed component. Should bring the system down
+ and the exception should get through.
+ """
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_dead(component)
+
+ def test_fail_dispensable(self):
+ """
+ Failure to start a dispensable component. The exception should get
+ through, but it should be restarted after a time skip.
+ """
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ # tell it to see if it must restart
+ restarted = component.restart()
+ # should not have restarted yet
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+ self._timeskip()
+ # tell it to see if it must restart and do so, with our vision of time
+ restarted = component.restart()
+ # should have restarted now
+ self.assertTrue(restarted)
+ self.__check_restarted(component)
+
+ def test_component_start_time(self):
+ """
+ Check that original start time is set initially, and remains the same
+ after a restart, while the internal __start_time does change
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ self.assertIsNone(component._original_start_time)
+ component.start()
+ self.__check_started(component)
+
+ self.assertIsNotNone(component._original_start_time)
+ self.assertIsNotNone(component._BaseComponent__start_time)
+ original_start_time = component._original_start_time
+ start_time = component._BaseComponent__start_time
+ # Not restarted yet, so they should be the same
+ self.assertEqual(original_start_time, start_time)
+
+ self._timeskip()
+ # Make it fail
+ restarted = component.failed(1)
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ # original start time should not have changed
+ self.assertEqual(original_start_time, component._original_start_time)
+ # but actual start time should
+ self.assertNotEqual(start_time, component._BaseComponent__start_time)
+
+ def test_bad_kind(self):
+ """
+ Test the component rejects nonsensical kinds. This includes bad
+ capitalization.
+ """
+ for kind in ['Core', 'CORE', 'nonsense', 'need ed', 'required']:
+ self.assertRaises(ValueError, Component, 'No process', self, kind)
+
+ def test_pid_not_running(self):
+ """
+ Test that a componet that is not yet started doesn't have a PID.
+ But it won't fail if asked for and return None.
+ """
+ for component_type in [Component,
+ isc.bind10.special_component.SockCreator,
+ isc.bind10.special_component.Msgq,
+ isc.bind10.special_component.CfgMgr,
+ isc.bind10.special_component.Auth,
+ isc.bind10.special_component.Resolver,
+ isc.bind10.special_component.CmdCtl]:
+ component = component_type('none', self, 'needed')
+ self.assertIsNone(component.pid())
+
+ def test_kill_unstarted(self):
+ """
+ Try to kill the component if it's not started. Should not fail.
+
+ We do not try to kill a running component, as we should not start
+ it during unit tests.
+ """
+ component = Component('component', self, 'needed')
+ component.kill()
+ component.kill(True)
+
+ def register_process(self, pid, process):
+ """
+ Part of pretending to be a boss
+ """
+ self.__registered_processes[pid] = process
+
+ def test_component_attributes(self):
+ """
+ Test the default attributes of Component (not BaseComponent) and
+ some of the methods we might be allowed to call.
+ """
+ class TestProcInfo:
+ def __init__(self):
+ self.pid = 42
+ component = Component('component', self, 'needed', 'Address',
+ ['hello'], TestProcInfo)
+ self.assertEqual('component', component._process)
+ self.assertEqual('component', component.name())
+ self.assertIsNone(component._procinfo)
+ self.assertIsNone(component.pid())
+ self.assertEqual(['hello'], component._params)
+ self.assertEqual('Address', component._address)
+ self.assertFalse(component.running())
+ self.assertEqual({}, self.__registered_processes)
+ component.start()
+ self.assertTrue(component.running())
+ # Some versions of unittest miss assertIsInstance
+ self.assertTrue(isinstance(component._procinfo, TestProcInfo))
+ self.assertEqual(42, component.pid())
+ self.assertEqual(component, self.__registered_processes.get(42))
+
+ def stop_process(self, process, address, pid):
+ """
+ Part of pretending to be boss.
+ """
+ self.__stop_process_params = (process, address, pid)
+
+ def start_simple(self, process):
+ """
+ Part of pretending to be boss.
+ """
+ self.__start_simple_params = process
+
+ def test_component_start_stop_internal(self):
+ """
+ Test the behavior of _stop_internal and _start_internal.
+ """
+ component = Component('component', self, 'needed', 'Address')
+ component.start()
+ self.assertTrue(component.running())
+ self.assertEqual('component', self.__start_simple_params)
+ component.pid = lambda: 42
+ component.stop()
+ self.assertFalse(component.running())
+ self.assertEqual(('component', 'Address', 42),
+ self.__stop_process_params)
+
+ def test_component_kill(self):
+ """
+ Check the kill is propagated. The case when component wasn't started
+ yet is already tested elsewhere.
+ """
+ class Process:
+ def __init__(self):
+ self.killed = False
+ self.terminated = False
+ def kill(self):
+ self.killed = True
+ def terminate(self):
+ self.terminated = True
+ process = Process()
+ class ProcInfo:
+ def __init__(self):
+ self.process = process
+ self.pid = 42
+ component = Component('component', self, 'needed', 'Address',
+ [], ProcInfo)
+ component.start()
+ self.assertTrue(component.running())
+ component.kill()
+ self.assertTrue(process.terminated)
+ self.assertFalse(process.killed)
+ process.terminated = False
+ component.kill(True)
+ self.assertTrue(process.killed)
+ self.assertFalse(process.terminated)
+
+ def setgid(self, gid):
+ self.__gid_set = gid
+
+ def setuid(self, uid):
+ self.__uid_set = uid
+
+ class FakeCreator:
+ def pid(self):
+ return 42
+ def terminate(self): pass
+ def kill(self): pass
+
+ def set_creator(self, creator):
+ """
+ Part of faking being the boss. Check the creator (faked as well)
+ is passed here.
+ """
+ self.assertTrue(isinstance(creator, self.FakeCreator))
+
+ def log_started(self, pid):
+ """
+ Part of faking the boss. Check the pid is the one of the fake creator.
+ """
+ self.assertEqual(42, pid)
+
+ def test_creator(self):
+ """
+ Some tests around the SockCreator component.
+ """
+ component = isc.bind10.special_component.SockCreator(None, self,
+ 'needed', None)
+ orig_setgid = isc.bind10.special_component.posix.setgid
+ orig_setuid = isc.bind10.special_component.posix.setuid
+ isc.bind10.special_component.posix.setgid = self.setgid
+ isc.bind10.special_component.posix.setuid = self.setuid
+ orig_creator = \
+ isc.bind10.special_component.isc.bind10.sockcreator.Creator
+ # Just ignore the creator call
+ isc.bind10.special_component.isc.bind10.sockcreator.Creator = \
+ lambda path: self.FakeCreator()
+ component.start()
+ # No gid/uid set in boss, nothing called.
+ self.assertIsNone(self.__gid_set)
+ self.assertIsNone(self.__uid_set)
+ # Doesn't do anything, but doesn't crash
+ component.stop()
+ component.kill()
+ component.kill(True)
+ self.gid = 4200
+ self.uid = 42
+ component = isc.bind10.special_component.SockCreator(None, self,
+ 'needed', None)
+ component.start()
+ # This time, it get's called
+ self.assertEqual(4200, self.__gid_set)
+ self.assertEqual(42, self.__uid_set)
+ isc.bind10.special_component.posix.setgid = orig_setgid
+ isc.bind10.special_component.posix.setuid = orig_setuid
+ isc.bind10.special_component.isc.bind10.sockcreator.Creator = \
+ orig_creator
+
+class TestComponent(BaseComponent):
+ """
+ A test component. It does not start any processes or so, it just logs
+ information about what happens.
+ """
+ def __init__(self, owner, name, kind, address=None, params=None):
+ """
+ Initializes the component. The owner is the test that started the
+ component. The logging will happen into it.
+
+ The process is used as a name for the logging.
+ """
+ BaseComponent.__init__(self, owner, kind)
+ self.__owner = owner
+ self.__name = name
+ self.log('init')
+ self.log(kind)
+ self._address = address
+ self._params = params
+
+ def log(self, event):
+ """
+ Log an event into the owner. The owner can then check the correct
+ order of events that happened.
+ """
+ self.__owner.log.append((self.__name, event))
+
+ def _start_internal(self):
+ self.log('start')
+
+ def _stop_internal(self):
+ self.log('stop')
+
+ def _failed_internal(self):
+ self.log('failed')
+
+ def kill(self, forceful=False):
+ self.log('killed')
+
+class FailComponent(BaseComponent):
+ """
+ A mock component that fails whenever it is started.
+ """
+ def __init__(self, name, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+
+ def _start_internal(self):
+ raise TestError("test error")
+
+class ConfiguratorTest(BossUtils, unittest.TestCase):
+ """
+ Tests for the configurator.
+ """
+ def setUp(self):
+ """
+ Prepare some test data for the tests.
+ """
+ BossUtils.setUp(self)
+ self.log = []
+ # The core "hardcoded" configuration
+ self.__core = {
+ 'core1': {
+ 'priority': 5,
+ 'process': 'core1',
+ 'special': 'test',
+ 'kind': 'core'
+ },
+ 'core2': {
+ 'process': 'core2',
+ 'special': 'test',
+ 'kind': 'core'
+ },
+ 'core3': {
+ 'process': 'core3',
+ 'priority': 3,
+ 'special': 'test',
+ 'kind': 'core'
+ }
+ }
+ # How they should be started. They are created in the order they are
+ # found in the dict, but then they should be started by priority.
+ # This expects that the same dict returns its keys in the same order
+ # every time
+ self.__core_log_create = []
+ for core in self.__core.keys():
+ self.__core_log_create.append((core, 'init'))
+ self.__core_log_create.append((core, 'core'))
+ self.__core_log_start = [('core1', 'start'), ('core3', 'start'),
+ ('core2', 'start')]
+ self.__core_log = self.__core_log_create + self.__core_log_start
+ self.__specials = { 'test': self.__component_test }
+
+ def __component_test(self, process, boss, kind, address=None, params=None):
+ """
+ Create a test component. It will log events to us.
+ """
+ self.assertEqual(self, boss)
+ return TestComponent(self, process, kind, address, params)
+
+ def test_init(self):
+ """
+ Tests the configurator can be created and it does not create
+ any components yet, nor does it remember anything.
+ """
+ configurator = Configurator(self, self.__specials)
+ self.assertEqual([], self.log)
+ self.assertEqual({}, configurator._components)
+ self.assertFalse(configurator.running())
+
+ def test_run_plan(self):
+ """
+ Test the internal function of running plans. Just see it can handle
+ the commands in the given order. We see that by the log.
+
+ Also includes one that raises, so we see it just stops there.
+ """
+ # Prepare the configurator and the plan
+ configurator = Configurator(self, self.__specials)
+ started = self.__component_test('second', self, 'dispensable')
+ started.start()
+ stopped = self.__component_test('first', self, 'core')
+ configurator._components = {'second': started}
+ plan = [
+ {
+ 'component': stopped,
+ 'command': 'start',
+ 'name': 'first',
+ 'config': {'a': 1}
+ },
+ {
+ 'component': started,
+ 'command': 'stop',
+ 'name': 'second',
+ 'config': {}
+ },
+ {
+ 'component': FailComponent('third', self, 'needed'),
+ 'command': 'start',
+ 'name': 'third',
+ 'config': {}
+ },
+ {
+ 'component': self.__component_test('fourth', self, 'core'),
+ 'command': 'start',
+ 'name': 'fourth',
+ 'config': {}
+ }
+ ]
+ # Don't include the preparation into the log
+ self.log = []
+ # The error from the third component is propagated
+ self.assertRaises(TestError, configurator._run_plan, plan)
+ # The first two were handled, the rest not, due to the exception
+ self.assertEqual([('first', 'start'), ('second', 'stop')], self.log)
+ self.assertEqual({'first': ({'a': 1}, stopped)},
+ configurator._components)
+
+ def __build_components(self, config):
+ """
+ Insert the components into the configuration to specify possible
+ Configurator._components.
+
+ Actually, the components are None, but we need something to be there.
+ """
+ result = {}
+ for name in config.keys():
+ result[name] = (config[name], None)
+ return result
+
+ def test_build_plan(self):
+ """
+ Test building the plan correctly. Not complete yet, this grows as we
+ add more ways of changing the plan.
+ """
+ configurator = Configurator(self, self.__specials)
+ plan = configurator._build_plan({}, self.__core)
+ # This should have created the components
+ self.assertEqual(self.__core_log_create, self.log)
+ self.assertEqual(3, len(plan))
+ for (task, name) in zip(plan, ['core1', 'core3', 'core2']):
+ self.assertTrue('component' in task)
+ self.assertEqual('start', task['command'])
+ self.assertEqual(name, task['name'])
+ component = task['component']
+ self.assertIsNone(component._address)
+ self.assertIsNone(component._params)
+
+ # A plan to go from older state to newer one containing more components
+ bigger = copy.copy(self.__core)
+ bigger['additional'] = {
+ 'priority': 6,
+ 'special': 'test',
+ 'process': 'additional',
+ 'kind': 'needed'
+ }
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(self.__core),
+ bigger)
+ self.assertEqual([('additional', 'init'), ('additional', 'needed')],
+ self.log)
+ self.assertEqual(1, len(plan))
+ self.assertTrue('component' in plan[0])
+ component = plan[0]['component']
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('additional', plan[0]['name'])
+
+ # Now remove the one component again
+ # We run the plan so the component is wired into internal structures
+ configurator._run_plan(plan)
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(bigger),
+ self.__core)
+ self.assertEqual([], self.log)
+ self.assertEqual([{
+ 'command': 'stop',
+ 'name': 'additional',
+ 'component': component
+ }], plan)
+
+ # We want to switch a component. So, prepare the configurator so it
+ # holds one
+ configurator._run_plan(configurator._build_plan(
+ self.__build_components(self.__core), bigger))
+ # Get a different configuration with a different component
+ different = copy.copy(self.__core)
+ different['another'] = {
+ 'special': 'test',
+ 'process': 'another',
+ 'kind': 'dispensable'
+ }
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(bigger),
+ different)
+ self.assertEqual([('another', 'init'), ('another', 'dispensable')],
+ self.log)
+ self.assertEqual(2, len(plan))
+ self.assertEqual('stop', plan[0]['command'])
+ self.assertEqual('additional', plan[0]['name'])
+ self.assertTrue('component' in plan[0])
+ self.assertEqual('start', plan[1]['command'])
+ self.assertEqual('another', plan[1]['name'])
+ self.assertTrue('component' in plan[1])
+
+ # Some slightly insane plans, like missing process, having parameters,
+ # no special, etc
+ plan = configurator._build_plan({}, {
+ 'component': {
+ 'kind': 'needed',
+ 'params': ["1", "2"],
+ 'address': 'address'
+ }
+ })
+ self.assertEqual(1, len(plan))
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('component', plan[0]['name'])
+ component = plan[0]['component']
+ self.assertEqual('component', component.name())
+ self.assertEqual(["1", "2"], component._params)
+ self.assertEqual('address', component._address)
+ self.assertEqual('needed', component._kind)
+ # We don't use isinstance on purpose, it would allow a descendant
+ self.assertTrue(type(component) is Component)
+ plan = configurator._build_plan({}, {
+ 'component': { 'kind': 'dispensable' }
+ })
+ self.assertEqual(1, len(plan))
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('component', plan[0]['name'])
+ component = plan[0]['component']
+ self.assertEqual('component', component.name())
+ self.assertIsNone(component._params)
+ self.assertIsNone(component._address)
+ self.assertEqual('dispensable', component._kind)
+
+ def __do_switch(self, option, value):
+ """
+ Start it with some component and then switch the configuration of the
+ component. This will probably raise, as it is not yet supported.
+ """
+ configurator = Configurator(self, self.__specials)
+ compconfig = {
+ 'special': 'test',
+ 'process': 'process',
+ 'priority': 13,
+ 'kind': 'core'
+ }
+ modifiedconfig = copy.copy(compconfig)
+ modifiedconfig[option] = value
+ return configurator._build_plan({'comp': (compconfig, None)},
+ {'comp': modifiedconfig})
+
+ def test_change_config_plan(self):
+ """
+ Test changing a configuration of one component. This is not yet
+ implemented and should therefore throw.
+ """
+ self.assertRaises(NotImplementedError, self.__do_switch, 'kind',
+ 'dispensable')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'special',
+ 'not_a_test')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'process',
+ 'different')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'address',
+ 'different')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'params',
+ ['different'])
+ # This does not change anything on running component, so no need to
+ # raise
+ self.assertEqual([], self.__do_switch('priority', 5))
+ # Check against false positive, if the data are the same, but different
+ # instance
+ self.assertEqual([], self.__do_switch('special', 'test'))
+
+ def __check_shutdown_log(self):
+ """
+ Checks the log for shutting down from the core configuration.
+ """
+ # We know everything must be stopped, we know what it is.
+ # But we don't know the order, so we check everything is exactly
+ # once in the log
+ components = set(self.__core.keys())
+ for (name, command) in self.log:
+ self.assertEqual('stop', command)
+ self.assertTrue(name in components)
+ components.remove(name)
+ self.assertEqual(set([]), components, "Some component wasn't stopped")
+
+ def test_run(self):
+ """
+ Passes some configuration to the startup method and sees if
+ the components are started up. Then it reconfigures it with
+ empty configuration, the original configuration again and shuts
+ down.
+
+ It also checks the components are kept inside the configurator.
+ """
+ configurator = Configurator(self, self.__specials)
+ # Can't reconfigure nor stop yet
+ self.assertRaises(ValueError, configurator.reconfigure, self.__core)
+ self.assertRaises(ValueError, configurator.shutdown)
+ self.assertFalse(configurator.running())
+ # Start it
+ configurator.startup(self.__core)
+ self.assertEqual(self.__core_log, self.log)
+ for core in self.__core.keys():
+ self.assertTrue(core in configurator._components)
+ self.assertEqual(self.__core[core],
+ configurator._components[core][0])
+ self.assertEqual(set(self.__core), set(configurator._components))
+ self.assertTrue(configurator.running())
+ # It can't be started twice
+ self.assertRaises(ValueError, configurator.startup, self.__core)
+
+ self.log = []
+ # Reconfigure - stop everything
+ configurator.reconfigure({})
+ self.assertEqual({}, configurator._components)
+ self.assertTrue(configurator.running())
+ self.__check_shutdown_log()
+
+ # Start it again
+ self.log = []
+ configurator.reconfigure(self.__core)
+ self.assertEqual(self.__core_log, self.log)
+ for core in self.__core.keys():
+ self.assertTrue(core in configurator._components)
+ self.assertEqual(self.__core[core],
+ configurator._components[core][0])
+ self.assertEqual(set(self.__core), set(configurator._components))
+ self.assertTrue(configurator.running())
+
+ # Do a shutdown
+ self.log = []
+ configurator.shutdown()
+ self.assertEqual({}, configurator._components)
+ self.assertFalse(configurator.running())
+ self.__check_shutdown_log()
+
+ # It can't be stopped twice
+ self.assertRaises(ValueError, configurator.shutdown)
+
+ def test_sort_no_prio(self):
+ """
+ There was a bug if there were two things with the same priority
+ (or without priority), it failed as it couldn't compare the dicts
+ there. This tests it doesn't crash.
+ """
+ configurator = Configurator(self, self.__specials)
+ configurator._build_plan({}, {
+ "c1": { 'kind': 'dispensable'},
+ "c2": { 'kind': 'dispensable'}
+ })
+
+if __name__ == '__main__':
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
index 4453184..f67781c 100644
--- a/src/lib/python/isc/bind10/tests/sockcreator_test.py
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -13,9 +13,6 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# This test file is generated .py.in -> .py just to be in the build dir,
-# same as the rest of the tests. Saves a lot of stuff in makefile.
-
"""
Tests for the bind10.sockcreator module.
"""
@@ -306,6 +303,7 @@ class WrapTests(unittest.TestCase):
# Transfer the descriptor
send_fd(t1.fileno(), p1.fileno())
+ p1.close()
p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
# Now, pass some data trough the socket
@@ -321,6 +319,14 @@ class WrapTests(unittest.TestCase):
data = t1.recv(1)
self.assertEqual(b'C', data)
+ # Explicitly close temporary socket pair as the Python
+ # interpreter expects it. It may not be 100% exception safe,
+ # but since this is only for tests we prefer brevity.
+ p1.close()
+ p2.close()
+ t1.close()
+ t2.close()
+
if __name__ == '__main__':
isc.log.init("bind10") # FIXME Should this be needed?
isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/bind10/tests/socket_cache_test.py b/src/lib/python/isc/bind10/tests/socket_cache_test.py
new file mode 100644
index 0000000..bbbf776
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/socket_cache_test.py
@@ -0,0 +1,396 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import isc.log
+import isc.bind10.socket_cache
+import isc.bind10.sockcreator
+from isc.net.addr import IPAddr
+import os
+
+class Test(unittest.TestCase):
+ """
+ Base for the tests here. It replaces the os.close method.
+ """
+ def setUp(self):
+ self._closes = []
+ isc.bind10.socket_cache.os.close = self.__close
+
+ def tearDown(self):
+ # This is not very clean solution. But when the test stops
+ # to exist, the method must not be used to destroy the
+ # object any more. And we can't restore the os.close here
+ # as we never work with real sockets here.
+ isc.bind10.socket_cache.os.close = lambda fd: None
+
+ def __close(self, fd):
+ """
+ Just log a close was called.
+ """
+ self._closes.append(fd)
+
+class SocketTest(Test):
+ """
+ Test for the Socket class.
+ """
+ def setUp(self):
+ """
+ Creates the socket to be tested.
+
+ It also creates other useful test variables.
+ """
+ Test.setUp(self)
+ self.__address = IPAddr("192.0.2.1")
+ self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+ 1024, 42)
+
+ def test_init(self):
+ """
+ Checks the intrnals of the cache just after the creation.
+ """
+ self.assertEqual('UDP', self.__socket.protocol)
+ self.assertEqual(self.__address, self.__socket.address)
+ self.assertEqual(1024, self.__socket.port)
+ self.assertEqual(42, self.__socket.fileno)
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual({}, self.__socket.shares)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+
+ def test_del(self):
+ """
+ Check it closes the socket when removed.
+ """
+ # This should make the refcount 0 and call the descructor
+ # right away
+ self.__socket = None
+ self.assertEqual([42], self._closes)
+
+ def test_share_modes(self):
+ """
+ Test the share mode compatibility check function.
+ """
+ modes = ['NO', 'SAMEAPP', 'ANY']
+ # If there are no shares, it is compatible with everything.
+ for mode in modes:
+ self.assertTrue(self.__socket.share_compatible(mode, 'anything'))
+
+ # There's an NO already, so it is incompatible with everything.
+ self.__socket.shares = {'token': ('NO', 'anything')}
+ for mode in modes:
+ self.assertFalse(self.__socket.share_compatible(mode, 'anything'))
+
+ # If there's SAMEAPP, it is compatible with ANY and SAMEAPP with the
+ # same name.
+ self.__socket.shares = {'token': ('SAMEAPP', 'app')}
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+ self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+
+ # If there's ANY, then ANY and SAMEAPP with the same name is compatible
+ self.__socket.shares = {'token': ('ANY', 'app')}
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'something'))
+
+ # In case there are multiple already inside
+ self.__socket.shares = {
+ 'token': ('ANY', 'app'),
+ 'another': ('SAMEAPP', 'app')
+ }
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+
+ # Invalid inputs are rejected
+ self.assertRaises(ValueError, self.__socket.share_compatible, 'bad',
+ 'bad')
+
+class SocketCacheTest(Test):
+ """
+ Some tests for the isc.bind10.socket_cache.Cache.
+
+ This class, as well as being the testcase, pretends to be the
+ socket creator so it can hijack all the requests for sockets.
+ """
+ def setUp(self):
+ """
+ Creates the cache for tests with us being the socket creator.
+
+ Also creates some more variables for testing.
+ """
+ Test.setUp(self)
+ self.__cache = isc.bind10.socket_cache.Cache(self)
+ self.__address = IPAddr("192.0.2.1")
+ self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+ 1024, 42)
+ self.__get_socket_called = False
+
+ def test_init(self):
+ """
+ Checks the internals of the cache just after the creation.
+ """
+ self.assertEqual(self, self.__cache._creator)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(), self.__cache._live_tokens)
+
+ def get_socket(self, address, port, socktype):
+ """
+ Pretend to be a socket creator.
+
+ This expects to be called with the _address, port 1024 and 'UDP'.
+
+ Returns 42 and notes down it was called.
+ """
+ self.assertEqual(self.__address, address)
+ self.assertEqual(1024, port)
+ self.assertEqual('UDP', socktype)
+ self.__get_socket_called = True
+ return 42
+
+ def test_get_token_cached(self):
+ """
+ Check the behaviour of get_token when the requested socket is already
+ cached inside.
+ """
+ self.__cache._sockets = {
+ 'UDP': {'192.0.2.1': {1024: self.__socket}}
+ }
+ token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+ 'test')
+ # It didn't call get_socket
+ self.assertFalse(self.__get_socket_called)
+ # It returned something
+ self.assertIsNotNone(token)
+ # The token is both in the waiting sockets and the live tokens
+ self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ # The token got the new share to block any relevant queries
+ self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+ # The socket knows the token is waiting in it
+ self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+ # If we request one more, with incompatible share, it is rejected
+ self.assertRaises(isc.bind10.socket_cache.ShareError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+ # The internals are not changed, so the same checks
+ self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+ self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+ def test_get_token_uncached(self):
+ """
+ Check a new socket is created when a corresponding one is missing.
+ """
+ token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+ 'test')
+ # The get_socket was called
+ self.assertTrue(self.__get_socket_called)
+ # It returned something
+ self.assertIsNotNone(token)
+ # Get the socket and check it looks OK
+ socket = self.__cache._waiting_tokens[token]
+ self.assertEqual(self.__address, socket.address)
+ self.assertEqual(1024, socket.port)
+ self.assertEqual(42, socket.fileno)
+ self.assertEqual('UDP', socket.protocol)
+ # The socket is properly cached
+ self.assertEqual({
+ 'UDP': {'192.0.2.1': {1024: socket}}
+ }, self.__cache._sockets)
+ # The token is both in the waiting sockets and the live tokens
+ self.assertEqual({token: socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ # The token got the new share to block any relevant queries
+ self.assertEqual({token: ('ANY', 'test')}, socket.shares)
+ # The socket knows the token is waiting in it
+ self.assertEqual(set([token]), socket.waiting_tokens)
+
+ def test_get_token_excs(self):
+ """
+ Test that it is handled properly if the socket creator raises
+ some exceptions.
+ """
+ def raiseCreatorError(fatal):
+ raise isc.bind10.sockcreator.CreatorError('test error', fatal)
+ # First, fatal socket creator errors are passed through
+ self.get_socket = lambda addr, port, proto: raiseCreatorError(True)
+ self.assertRaises(isc.bind10.sockcreator.CreatorError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+ # And nonfatal are converted to SocketError
+ self.get_socket = lambda addr, port, proto: raiseCreatorError(False)
+ self.assertRaises(isc.bind10.socket_cache.SocketError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+
+ def test_get_socket(self):
+ """
+ Test that we can pickup a socket if we know a token.
+ """
+ token = "token"
+ app = 13
+ # No socket prepared there
+ self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+ # Not changed
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(), self.__cache._live_tokens)
+ # Prepare a token there
+ self.__socket.waiting_tokens = set([token])
+ self.__socket.shares = {token: ('ANY', 'app')}
+ self.__cache._waiting_tokens = {token: self.__socket}
+ self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+ self.__cache._live_tokens = set([token])
+ socket = self.__cache.get_socket(token, app)
+ # Received the fileno
+ self.assertEqual(42, socket)
+ # It moved from waiting to active ones
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({token: self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({13: set([token])}, self.__cache._active_apps)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({token: 13}, self.__socket.active_tokens)
+ # Trying to get it again fails
+ self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+
+ def test_drop_application(self):
+ """
+ Test that a drop_application calls drop_socket on all the sockets
+ held by the application.
+ """
+ sockets = set()
+ def drop_socket(token):
+ sockets.add(token)
+ # Mock the drop_socket so we know it is called
+ self.__cache.drop_socket = drop_socket
+ self.assertRaises(ValueError, self.__cache.drop_application,
+ 13)
+ self.assertEqual(set(), sockets)
+ # Put the tokens into active_apps. Nothing else should be touched
+ # by this call, so leave it alone.
+ self.__cache._active_apps = {
+ 1: set(['t1', 't2']),
+ 2: set(['t3'])
+ }
+ self.__cache.drop_application(1)
+ # We don't check the _active_apps, as it would be cleaned by
+ # drop_socket and we removed it.
+ self.assertEqual(set(['t1', 't2']), sockets)
+
+ def test_drop_socket(self):
+ """
+ Test the drop_socket call. It tests:
+ * That a socket that still has something to keep it alive is left alive
+ (both waiting and active).
+ * If not, it is deleted.
+ * All bookkeeping data around are properly removed.
+ * Of course the exception.
+ """
+ self.assertRaises(ValueError, self.__cache.drop_socket, "bad token")
+ self.__socket.active_tokens = {'t1': 1}
+ self.__socket.waiting_tokens = set(['t2'])
+ self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+ self.__cache._waiting_tokens = {'t2': self.__socket}
+ self.__cache._active_tokens = {'t1': self.__socket}
+ self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+ self.__cache._live_tokens = set(['t1', 't2'])
+ self.__cache._active_apps = {1: set(['t1'])}
+ # We can't drop what wasn't picket up yet
+ self.assertRaises(ValueError, self.__cache.drop_socket, 't2')
+ self.assertEqual({'t1': 1}, self.__socket.active_tokens)
+ self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+ self.assertEqual({'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')},
+ self.__socket.shares)
+ self.assertEqual({'t2': self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual({'t1': self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t1', 't2']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t1'])}, self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # If we drop this, it survives because it waits for being picked up
+ self.__cache.drop_socket('t1')
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+ self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t2']), self.__cache._live_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # Fill it again, now two applications having the same socket
+ self.__socket.active_tokens = {'t1': 1, 't2': 2}
+ self.__socket.waiting_tokens = set()
+ self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+ self.__cache._waiting_tokens = {}
+ self.__cache._active_tokens = {
+ 't1': self.__socket,
+ 't2': self.__socket
+ }
+ self.__cache._live_tokens = set(['t1', 't2', 't3'])
+ self.assertEqual([], self._closes)
+ # We cheat here little bit, the t3 doesn't exist enywhere else, but
+ # we need to check the app isn't removed too soon and it shouldn't
+ # matter anywhere else, so we just avoid the tiresome filling in
+ self.__cache._active_apps = {1: set(['t1', 't3']), 2: set(['t2'])}
+ # Drop it as t1. It should still live.
+ self.__cache.drop_socket('t1')
+ self.assertEqual({'t2': 2}, self.__socket.active_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({'t2': self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t3', 't2']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t3']), 2: set(['t2'])},
+ self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # Drop it again, from the other application. It should get removed
+ # and closed.
+ self.__cache.drop_socket('t2')
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(['t3']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t3'])}, self.__cache._active_apps)
+ # The cache doesn't hold the socket. So when we remove it ourself,
+ # it should get closed.
+ self.__socket = None
+ self.assertEqual([42], self._closes)
+
+if __name__ == '__main__':
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/cc/data.py b/src/lib/python/isc/cc/data.py
index 76ef942..636e9a9 100644
--- a/src/lib/python/isc/cc/data.py
+++ b/src/lib/python/isc/cc/data.py
@@ -21,6 +21,7 @@
#
import json
+import re
class DataNotFoundError(Exception):
"""Raised if an identifier does not exist according to a spec file,
@@ -86,6 +87,13 @@ def split_identifier(identifier):
id_parts[:] = (value for value in id_parts if value != "")
return id_parts
+def identifier_has_list_index(identifier):
+ """Returns True if the given identifier string has at least one
+ list index (with [I], where I is a number"""
+ return (type(identifier) == str and
+ re.search("\[\d+\]", identifier) is not None)
+
+
def split_identifier_list_indices(identifier):
"""Finds list indexes in the given identifier, which are of the
format [integer].
diff --git a/src/lib/python/isc/cc/session.py b/src/lib/python/isc/cc/session.py
index f6b6265..33a47bd 100644
--- a/src/lib/python/isc/cc/session.py
+++ b/src/lib/python/isc/cc/session.py
@@ -72,7 +72,7 @@ class Session:
self._lname = None
self._closed = True
- def sendmsg(self, env, msg = None):
+ def sendmsg(self, env, msg=None):
with self._lock:
if self._closed:
raise SessionError("Session has been closed.")
@@ -82,15 +82,24 @@ class Session:
raise ProtocolError("Envelope too large")
if type(msg) == dict:
msg = isc.cc.message.to_wire(msg)
- self._socket.setblocking(1)
length = 2 + len(env);
- if msg:
+ if msg is not None:
length += len(msg)
- self._socket.send(struct.pack("!I", length))
- self._socket.send(struct.pack("!H", len(env)))
- self._socket.send(env)
- if msg:
- self._socket.send(msg)
+
+ # Build entire message.
+ data = struct.pack("!I", length)
+ data += struct.pack("!H", len(env))
+ data += env
+ if msg is not None:
+ data += msg
+
+ # Send it in the blocking mode. On some systems send() may
+ # actually send only part of the data, so we need to repeat it
+ # until all data have been sent out.
+ self._socket.setblocking(1)
+ while len(data) > 0:
+ cc = self._socket.send(data)
+ data = data[cc:]
def recvmsg(self, nonblock = True, seq = None):
"""Reads a message. If nonblock is true, and there is no
diff --git a/src/lib/python/isc/cc/tests/.gitignore b/src/lib/python/isc/cc/tests/.gitignore
new file mode 100644
index 0000000..b7ac2ae
--- /dev/null
+++ b/src/lib/python/isc/cc/tests/.gitignore
@@ -0,0 +1 @@
+/cc_test
diff --git a/src/lib/python/isc/cc/tests/session_test.py b/src/lib/python/isc/cc/tests/session_test.py
index 772ed0c..e589085 100644
--- a/src/lib/python/isc/cc/tests/session_test.py
+++ b/src/lib/python/isc/cc/tests/session_test.py
@@ -29,6 +29,7 @@ class MySocket():
self.recvqueue = bytearray()
self.sendqueue = bytearray()
self._blocking = True
+ self.send_limit = None
def connect(self, to):
pass
@@ -40,7 +41,14 @@ class MySocket():
self._blocking = val
def send(self, data):
- self.sendqueue.extend(data);
+ # If the upper limit is specified, only "send" up to the specified
+ # limit
+ if self.send_limit is not None and len(data) > self.send_limit:
+ self.sendqueue.extend(data[0:self.send_limit])
+ return self.send_limit
+ else:
+ self.sendqueue.extend(data)
+ return len(data)
def readsent(self, length):
if length > len(self.sendqueue):
@@ -101,6 +109,17 @@ class MySocket():
def gettimeout(self):
return 0
+ def set_send_limit(self, limit):
+ '''Specify the upper limit of the transmittable data at once.
+
+ By default, the send() method of this class "sends" all given data.
+ If this method is called and the its parameter is not None,
+ subsequent calls to send() will only transmit the specified amount
+ of data. This can be used to emulate the situation where send()
+ on a real socket object results in partial write.
+ '''
+ self.send_limit = limit
+
#
# We subclass the Session class we're testing here, only
# to override the __init__() method, which wants a socket,
@@ -157,6 +176,16 @@ class testSession(unittest.TestCase):
#print(sent)
#self.assertRaises(SessionError, sess.sendmsg, {}, {"hello": "a"})
+ def test_session_sendmsg_shortwrite(self):
+ sess = MySession()
+ # Specify the upper limit of the size that can be transmitted at
+ # a single send() call on the faked socket (10 is an arbitrary choice,
+ # just reasonably small).
+ sess._socket.set_send_limit(10)
+ sess.sendmsg({'to': 'someone', 'reply': 1}, {"hello": "a"})
+ # The complete message should still have been transmitted in the end.
+ sent = sess._socket.readsentmsg();
+
def recv_and_compare(self, session, bytes, env, msg):
"""Adds bytes to the recvqueue (which will be read by the
session object, and compare the resultinv env and msg to
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index ef696fb..cda8b57 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -13,6 +13,7 @@ CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.pyc
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyo
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 11a13ec..703d196 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -38,6 +38,7 @@
from isc.cc import Session
from isc.config.config_data import ConfigData, MultiConfigData, BIND10_CONFIG_DATA_VERSION
+import isc.config.module_spec
import isc
from isc.util.file import path_search
import bind10_config
@@ -97,6 +98,7 @@ COMMAND_SET_CONFIG = "set_config"
COMMAND_GET_MODULE_SPEC = "get_module_spec"
COMMAND_MODULE_SPEC = "module_spec"
COMMAND_SHUTDOWN = "shutdown"
+COMMAND_MODULE_STOPPING = "stopping"
def parse_command(msg):
"""Parses what may be a command message. If it looks like one,
@@ -143,7 +145,9 @@ class ModuleCCSession(ConfigData):
callbacks are called when 'check_command' is called on the
ModuleCCSession"""
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session=None, handle_logging_config=True):
+ def __init__(self, spec_file_name, config_handler, command_handler,
+ cc_session=None, handle_logging_config=True,
+ socket_file = None):
"""Initialize a ModuleCCSession. This does *NOT* send the
specification and request the configuration yet. Use start()
for that once the ModuleCCSession has been initialized.
@@ -165,6 +169,12 @@ class ModuleCCSession(ConfigData):
logger manager when the logging configuration gets updated.
The module does not need to do anything except intializing
its loggers, and provide log messages. Defaults to true.
+
+ socket_file: If cc_session was none, this optional argument
+ specifies which socket file to use to connect to msgq. It
+ will be overridden by the environment variable
+ MSGQ_SOCKET_FILE. If none, and no environment variable is
+ set, it will use the system default.
"""
module_spec = isc.config.module_spec_from_file(spec_file_name)
ConfigData.__init__(self, module_spec)
@@ -175,7 +185,7 @@ class ModuleCCSession(ConfigData):
self.set_command_handler(command_handler)
if not cc_session:
- self._session = Session()
+ self._session = Session(socket_file)
else:
self._session = cc_session
self._session.group_subscribe(self._module_name, "*")
@@ -202,6 +212,24 @@ class ModuleCCSession(ConfigData):
self.__send_spec()
self.__request_config()
+ def send_stopping(self):
+ """Sends a 'stopping' message to the configuration manager. This
+ message is just an FYI, and no response is expected. Any errors
+ when sending this message (for instance if the msgq session has
+ previously been closed) are logged, but ignored."""
+ # create_command could raise an exception as well, but except for
+ # out of memory related errors, these should all be programming
+ # failures and are not caught
+ msg = create_command(COMMAND_MODULE_STOPPING,
+ self.get_module_spec().get_full_spec())
+ try:
+ self._session.group_sendmsg(msg, "ConfigManager")
+ except Exception as se:
+ # If the session was previously closed, obvously trying to send
+ # a message fails. (TODO: check if session is open so we can
+ # error on real problems?)
+ logger.error(CONFIG_SESSION_STOPPING_FAILED, se)
+
def get_socket(self):
"""Returns the socket from the command channel session. This
should *only* be used for select() loops to see if there
@@ -300,43 +328,97 @@ class ModuleCCSession(ConfigData):
and return an answer created with create_answer()"""
self._command_handler = command_handler
- def add_remote_config(self, spec_file_name, config_update_callback = None):
- """Gives access to the configuration of a different module.
- These remote module options can at this moment only be
- accessed through get_remote_config_value(). This function
- also subscribes to the channel of the remote module name
- to receive the relevant updates. It is not possible to
- specify your own handler for this right now.
- start() must have been called on this CCSession
- prior to the call to this method.
- Returns the name of the module."""
- module_spec = isc.config.module_spec_from_file(spec_file_name)
+ def _add_remote_config_internal(self, module_spec,
+ config_update_callback=None):
+ """The guts of add_remote_config and add_remote_config_by_name"""
module_cfg = ConfigData(module_spec)
module_name = module_spec.get_module_name()
+
self._session.group_subscribe(module_name)
# Get the current config for that module now
seq = self._session.group_sendmsg(create_command(COMMAND_GET_CONFIG, { "module_name": module_name }), "ConfigManager")
try:
- answer, env = self._session.group_recvmsg(False, seq)
+ answer, _ = self._session.group_recvmsg(False, seq)
except isc.cc.SessionTimeout:
raise ModuleCCSessionError("No answer from ConfigManager when "
"asking about Remote module " +
module_name)
+ call_callback = False
if answer:
rcode, value = parse_answer(answer)
if rcode == 0:
- if value != None and module_spec.validate_config(False, value):
- module_cfg.set_local_config(value)
- if config_update_callback is not None:
- config_update_callback(value, module_cfg)
+ if value != None:
+ if module_spec.validate_config(False, value):
+ module_cfg.set_local_config(value)
+ call_callback = True
+ else:
+ raise ModuleCCSessionError("Bad config data for " +
+ module_name + ": " +
+ str(value))
+ else:
+ raise ModuleCCSessionError("Failure requesting remote " +
+ "configuration data for " +
+ module_name)
# all done, add it
self._remote_module_configs[module_name] = module_cfg
self._remote_module_callbacks[module_name] = config_update_callback
+ if call_callback and config_update_callback is not None:
+ config_update_callback(value, module_cfg)
+
+ def add_remote_config_by_name(self, module_name,
+ config_update_callback=None):
+ """
+ This does the same as add_remote_config, but you provide the module name
+ instead of the name of the spec file.
+ """
+ seq = self._session.group_sendmsg(create_command(COMMAND_GET_MODULE_SPEC,
+ { "module_name":
+ module_name }),
+ "ConfigManager")
+ try:
+ answer, env = self._session.group_recvmsg(False, seq)
+ except isc.cc.SessionTimeout:
+ raise ModuleCCSessionError("No answer from ConfigManager when " +
+ "asking about for spec of Remote " +
+ "module " + module_name)
+ if answer:
+ rcode, value = parse_answer(answer)
+ if rcode == 0:
+ module_spec = isc.config.module_spec.ModuleSpec(value)
+ if module_spec.get_module_name() != module_name:
+ raise ModuleCCSessionError("Module name mismatch: " +
+ module_name + " and " +
+ module_spec.get_module_name())
+ self._add_remote_config_internal(module_spec,
+ config_update_callback)
+ else:
+ raise ModuleCCSessionError("Error code " + str(rcode) +
+ "when asking for module spec of " +
+ module_name)
+ else:
+ raise ModuleCCSessionError("No answer when asking for module " +
+ "spec of " + module_name)
+ # Just to be consistent with the add_remote_config
return module_name
-
+
+ def add_remote_config(self, spec_file_name, config_update_callback=None):
+ """Gives access to the configuration of a different module.
+ These remote module options can at this moment only be
+ accessed through get_remote_config_value(). This function
+ also subscribes to the channel of the remote module name
+ to receive the relevant updates. It is not possible to
+ specify your own handler for this right now, but you can
+ specify a callback that is called after the change happened.
+ start() must have been called on this CCSession
+ prior to the call to this method.
+ Returns the name of the module."""
+ module_spec = isc.config.module_spec_from_file(spec_file_name)
+ self._add_remote_config_internal(module_spec, config_update_callback)
+ return module_spec.get_module_name()
+
def remove_remote_config(self, module_name):
"""Removes the remote configuration access for this module"""
if module_name in self._remote_module_configs:
@@ -363,7 +445,7 @@ class ModuleCCSession(ConfigData):
except isc.cc.SessionTimeout:
# TODO: log an error?
pass
-
+
def __request_config(self):
"""Asks the configuration manager for the current configuration, and call the config handler if set.
Raises a ModuleCCSessionError if there is no answer from the configuration manager"""
@@ -402,30 +484,38 @@ class UIModuleCCSession(MultiConfigData):
passed must have send_GET and send_POST functions"""
MultiConfigData.__init__(self)
self._conn = conn
- self.request_specifications()
- self.request_current_config()
+ self.update_specs_and_config()
def request_specifications(self):
- """Request the module specifications from b10-cmdctl"""
- # this step should be unnecessary but is the current way cmdctl returns stuff
- # so changes are needed there to make this clean (we need a command to simply get the
- # full specs for everything, including commands etc, not separate gets for that)
+ """Clears the current list of specifications, and requests a new
+ list from b10-cmdctl. As other actions may have caused modules
+ to be stopped, or new modules to be added, this is expected to
+ be run after each interaction (at this moment). It is usually
+ also combined with request_current_config(). For that reason,
+ we provide update_specs_and_config() which calls both."""
specs = self._conn.send_GET('/module_spec')
+ self.clear_specifications()
for module in specs.keys():
self.set_specification(isc.config.ModuleSpec(specs[module]))
- def update_specs_and_config(self):
- self.request_specifications()
- self.request_current_config()
-
def request_current_config(self):
"""Requests the current configuration from the configuration
- manager through b10-cmdctl, and stores those as CURRENT"""
+ manager through b10-cmdctl, and stores those as CURRENT. This
+ does not modify any local changes, it just updates to the current
+ state of the server itself."""
config = self._conn.send_GET('/config_data')
if 'version' not in config or config['version'] != BIND10_CONFIG_DATA_VERSION:
raise ModuleCCSessionError("Bad config version")
self._set_current_config(config)
+ def update_specs_and_config(self):
+ """Convenience function to both clear and update the known list of
+ module specifications, and update the current configuration on
+ the server side. There are a few cases where the caller might only
+ want to run one of these tasks, but often they are both needed."""
+ self.request_specifications()
+ self.request_current_config()
+
def _add_value_to_list(self, identifier, value, module_spec):
cur_list, status = self.get_value(identifier)
if not cur_list:
@@ -443,9 +533,9 @@ class UIModuleCCSession(MultiConfigData):
cur_list.append(value)
self.set_value(identifier, cur_list)
else:
- raise isc.cc.data.DataAlreadyPresentError(value +
+ raise isc.cc.data.DataAlreadyPresentError(str(value) +
" already in "
- + identifier)
+ + str(identifier))
def _add_value_to_named_set(self, identifier, value, item_value):
if type(value) != str:
@@ -466,8 +556,8 @@ class UIModuleCCSession(MultiConfigData):
self.set_value(identifier, cur_map)
else:
raise isc.cc.data.DataAlreadyPresentError(value +
- " already in "
- + identifier)
+ " already in " +
+ identifier)
def add_value(self, identifier, value_str = None, set_value_str = None):
"""Add a value to a configuration list. Raises a DataTypeError
@@ -535,6 +625,7 @@ class UIModuleCCSession(MultiConfigData):
cur_map = {}
if value in cur_map:
del cur_map[value]
+ self.set_value(identifier, cur_map)
else:
raise isc.cc.data.DataNotFoundError(value + " not found in named_set " + str(identifier))
@@ -573,7 +664,6 @@ class UIModuleCCSession(MultiConfigData):
# answer is either an empty dict (on success), or one
# containing errors
if answer == {}:
- self.request_current_config()
self.clear_local_changes()
elif "error" in answer:
raise ModuleCCSessionError("Error: " + str(answer["error"]) + "\n" + "Configuration not committed")
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 9996a19..aa0547b 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -81,6 +81,7 @@ class ConfigManagerData:
and stop loading the system.
"""
config = ConfigManagerData(data_path, file_name)
+ logger.info(CFGMGR_CONFIG_FILE, config.db_filename)
file = None
try:
file = open(config.db_filename, 'r')
@@ -117,12 +118,13 @@ class ConfigManagerData:
if file:
file.close();
return config
-
+
def write_to_file(self, output_file_name = None):
"""Writes the current configuration data to a file. If
output_file_name is not specified, the file used in
read_from_file is used."""
filename = None
+
try:
file = tempfile.NamedTemporaryFile(mode='w',
prefix="b10-config.db.",
@@ -147,6 +149,27 @@ class ConfigManagerData:
# Ok if we really can't delete it anymore, leave it
pass
+ def rename_config_file(self, old_file_name=None, new_file_name=None):
+ """Renames the given configuration file to the given new file name,
+ if it exists. If it does not exist, nothing happens.
+ If old_file_name is None (default), the file used in
+ read_from_file is used. If new_file_name is None (default), the
+ file old_file_name appended with .bak is used. If that file exists
+ already, .1 is appended. If that file exists, .2 is appended, etc.
+ """
+ if old_file_name is None:
+ old_file_name = self.db_filename
+ if new_file_name is None:
+ new_file_name = old_file_name + ".bak"
+ if os.path.exists(new_file_name):
+ i = 1
+ while os.path.exists(new_file_name + "." + str(i)):
+ i += 1
+ new_file_name = new_file_name + "." + str(i)
+ if os.path.exists(old_file_name):
+ logger.info(CFGMGR_BACKED_UP_CONFIG_FILE, old_file_name, new_file_name)
+ os.rename(old_file_name, new_file_name)
+
def __eq__(self, other):
"""Returns True if the data contained is equal. data_path and
db_filename may be different."""
@@ -162,14 +185,16 @@ class ConfigManager:
channel session. If not, a new session will be created.
The ability to specify a custom session is for testing purposes
and should not be needed for normal usage."""
- def __init__(self, data_path, database_filename, session=None):
+ def __init__(self, data_path, database_filename, session=None,
+ clear_config=False):
"""Initialize the configuration manager. The data_path string
is the path to the directory where the configuration is
stored (in <data_path>/<database_filename> or in
- <database_filename>, if it is absolute). The dabase_filename
+ <database_filename>, if it is absolute). The database_filename
is the config file to load. Session is an optional
cc-channel session. If this is not given, a new one is
- created."""
+ created. If clear_config is True, the configuration file is
+ renamed and a new one is created."""
self.data_path = data_path
self.database_filename = database_filename
self.module_specs = {}
@@ -178,6 +203,8 @@ class ConfigManager:
# of some other process
self.virtual_modules = {}
self.config = ConfigManagerData(data_path, database_filename)
+ if clear_config:
+ self.config.rename_config_file()
if session:
self.cc = session
else:
@@ -202,7 +229,7 @@ class ConfigManager:
def notify_boss(self):
"""Notifies the Boss module that the Config Manager is running"""
- self.cc.group_sendmsg({"running": "configmanager"}, "Boss")
+ self.cc.group_sendmsg({"running": "ConfigManager"}, "Boss")
def set_module_spec(self, spec):
"""Adds a ModuleSpec"""
@@ -291,12 +318,12 @@ class ConfigManager:
# ok, just start with an empty config
self.config = ConfigManagerData(self.data_path,
self.database_filename)
-
+
def write_config(self):
"""Write the current configuration to the file specificied at init()"""
self.config.write_to_file()
- def _handle_get_module_spec(self, cmd):
+ def __handle_get_module_spec(self, cmd):
"""Private function that handles the 'get_module_spec' command"""
answer = {}
if cmd != None:
@@ -317,7 +344,7 @@ class ConfigManager:
answer = ccsession.create_answer(0, self.get_module_spec())
return answer
- def _handle_get_config_dict(self, cmd):
+ def __handle_get_config_dict(self, cmd):
"""Private function that handles the 'get_config' command
where the command has been checked to be a dict"""
if 'module_name' in cmd and cmd['module_name'] != '':
@@ -331,17 +358,17 @@ class ConfigManager:
else:
return ccsession.create_answer(1, "Bad module_name in get_config command")
- def _handle_get_config(self, cmd):
+ def __handle_get_config(self, cmd):
"""Private function that handles the 'get_config' command"""
if cmd != None:
if type(cmd) == dict:
- return self._handle_get_config_dict(cmd)
+ return self.__handle_get_config_dict(cmd)
else:
return ccsession.create_answer(1, "Bad get_config command, argument not a dict")
else:
return ccsession.create_answer(0, self.config.data)
- def _handle_set_config_module(self, module_name, cmd):
+ def __handle_set_config_module(self, module_name, cmd):
# the answer comes (or does not come) from the relevant module
# so we need a variable to see if we got it
answer = None
@@ -404,7 +431,7 @@ class ConfigManager:
self.config.data = old_data
return answer
- def _handle_set_config_all(self, cmd):
+ def __handle_set_config_all(self, cmd):
old_data = copy.deepcopy(self.config.data)
got_error = False
err_list = []
@@ -412,7 +439,7 @@ class ConfigManager:
# sets, so we simply call set_config_module for each of those
for module in cmd:
if module != "version":
- answer = self._handle_set_config_module(module, cmd[module])
+ answer = self.__handle_set_config_module(module, cmd[module])
if answer == None:
got_error = True
err_list.append("No answer message from " + module)
@@ -431,37 +458,58 @@ class ConfigManager:
self.config.data = old_data
return ccsession.create_answer(1, " ".join(err_list))
- def _handle_set_config(self, cmd):
+ def __handle_set_config(self, cmd):
"""Private function that handles the 'set_config' command"""
answer = None
if cmd == None:
return ccsession.create_answer(1, "Wrong number of arguments")
if len(cmd) == 2:
- answer = self._handle_set_config_module(cmd[0], cmd[1])
+ answer = self.__handle_set_config_module(cmd[0], cmd[1])
elif len(cmd) == 1:
- answer = self._handle_set_config_all(cmd[0])
+ answer = self.__handle_set_config_all(cmd[0])
else:
answer = ccsession.create_answer(1, "Wrong number of arguments")
if not answer:
answer = ccsession.create_answer(1, "No answer message from " + cmd[0])
-
+
return answer
- def _handle_module_spec(self, spec):
+ def __handle_module_spec(self, spec):
"""Private function that handles the 'module_spec' command"""
# todo: validate? (no direct access to spec as
# todo: use ModuleSpec class
# todo: error checking (like keyerrors)
answer = {}
self.set_module_spec(spec)
-
- # We should make one general 'spec update for module' that
- # passes both specification and commands at once
+ self._send_module_spec_to_cmdctl(spec.get_module_name(),
+ spec.get_full_spec())
+ return ccsession.create_answer(0)
+
+ def __handle_module_stopping(self, arg):
+ """Private function that handles a 'stopping' command;
+ The argument is of the form { 'module_name': <name> }.
+ If the module is known, it is removed from the known list,
+ and a message is sent to the Cmdctl channel to remove it as well.
+ If it is unknown, the message is ignored."""
+ if arg['module_name'] in self.module_specs:
+ del self.module_specs[arg['module_name']]
+ self._send_module_spec_to_cmdctl(arg['module_name'], None)
+ # This command is not expected to be answered
+ return None
+
+ def _send_module_spec_to_cmdctl(self, module_name, spec):
+ """Sends the given module spec for the given module name to Cmdctl.
+ Parameters:
+ module_name: A string with the name of the module
+ spec: dict containing full module specification, as returned by
+ ModuleSpec.get_full_spec(). This argument may also be None,
+ in which case it signals Cmdctl to remove said module from
+ its list.
+ No response from Cmdctl is expected."""
spec_update = ccsession.create_command(ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
- [ spec.get_module_name(), spec.get_full_spec() ])
+ [ module_name, spec ])
self.cc.group_sendmsg(spec_update, "Cmdctl")
- return ccsession.create_answer(0)
def handle_msg(self, msg):
"""Handle a command from the cc channel to the configuration manager"""
@@ -473,17 +521,19 @@ class ConfigManager:
elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC:
answer = ccsession.create_answer(0, self.get_statistics_spec())
elif cmd == ccsession.COMMAND_GET_MODULE_SPEC:
- answer = self._handle_get_module_spec(arg)
+ answer = self.__handle_get_module_spec(arg)
elif cmd == ccsession.COMMAND_GET_CONFIG:
- answer = self._handle_get_config(arg)
+ answer = self.__handle_get_config(arg)
elif cmd == ccsession.COMMAND_SET_CONFIG:
- answer = self._handle_set_config(arg)
+ answer = self.__handle_set_config(arg)
+ elif cmd == ccsession.COMMAND_MODULE_STOPPING:
+ answer = self.__handle_module_stopping(arg)
elif cmd == ccsession.COMMAND_SHUTDOWN:
self.running = False
answer = ccsession.create_answer(0)
elif cmd == ccsession.COMMAND_MODULE_SPEC:
try:
- answer = self._handle_module_spec(isc.config.ModuleSpec(arg))
+ answer = self.__handle_module_spec(isc.config.ModuleSpec(arg))
except isc.config.ModuleSpecError as dde:
answer = ccsession.create_answer(1, "Error in data definition: " + str(dde))
else:
@@ -491,7 +541,7 @@ class ConfigManager:
else:
answer = ccsession.create_answer(1, "Unknown message format: " + str(msg))
return answer
-
+
def run(self):
"""Runs the configuration manager."""
self.running = True
@@ -507,4 +557,6 @@ class ConfigManager:
# not ask
if msg is not None and not 'result' in msg:
answer = self.handle_msg(msg);
- self.cc.group_reply(env, answer)
+ # Only respond if there actually is something to respond with
+ if answer is not None:
+ self.cc.group_reply(env, answer)
diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes
index 61a63ed..8701db3 100644
--- a/src/lib/python/isc/config/cfgmgr_messages.mes
+++ b/src/lib/python/isc/config/cfgmgr_messages.mes
@@ -20,6 +20,12 @@ An older version of the configuration database has been found, from which
there was an automatic upgrade path to the current version. These changes
are now applied, and no action from the administrator is necessary.
+% CFGMGR_BACKED_UP_CONFIG_FILE Config file %1 was removed; a backup was made at %2
+BIND 10 has been started with the command to clear the configuration
+file. The existing file has been backed up (moved) to the given file
+name. A new configuration file will be created in the original location
+when necessary.
+
% CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2
The configuration manager sent a configuration update to a module, but
the module responded with an answer that could not be parsed. The answer
@@ -31,6 +37,10 @@ assumed to have failed, and will not be stored.
The configuration manager daemon was unable to connect to the messaging
system. The most likely cause is that msgq is not running.
+% CFGMGR_CONFIG_FILE Configuration manager starting with configuration file: %1
+The configuration manager is starting, reading and saving the configuration
+settings to the shown file.
+
% CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1
There was a problem reading the persistent configuration data as stored
on disk. The file may be corrupted, or it is of a version from where
@@ -54,4 +64,3 @@ configuration is not stored.
% CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the cfgmgr daemon. The
daemon will now shut down.
-
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index fabd37d..2bec4ab 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -23,11 +23,28 @@ two through the classes in ccsession)
import isc.cc.data
import isc.config.module_spec
import ast
+import copy
class ConfigDataError(Exception): pass
BIND10_CONFIG_DATA_VERSION = 2
+# Helper functions
+def spec_part_is_list(spec_part):
+ """Returns True if the given spec_part is a dict that contains a
+ list specification, and False otherwise."""
+ return (type(spec_part) == dict and 'list_item_spec' in spec_part)
+
+def spec_part_is_map(spec_part):
+ """Returns True if the given spec_part is a dict that contains a
+ map specification, and False otherwise."""
+ return (type(spec_part) == dict and 'map_item_spec' in spec_part)
+
+def spec_part_is_named_set(spec_part):
+ """Returns True if the given spec_part is a dict that contains a
+ named_set specification, and False otherwise."""
+ return (type(spec_part) == dict and 'named_set_item_spec' in spec_part)
+
def check_type(spec_part, value):
"""Does nothing if the value is of the correct type given the
specification part relevant for the value. Raises an
@@ -93,14 +110,17 @@ def convert_type(spec_part, value):
return ret
elif data_type == "map":
- map = ast.literal_eval(value)
- if type(map) == dict:
- # todo: check types of map contents too
- return map
- else:
- raise isc.cc.data.DataTypeError(
- "Value in convert_type not a string "
- "specifying a dict")
+ try:
+ map = ast.literal_eval(value)
+ if type(map) == dict:
+ # todo: check types of map contents too
+ return map
+ else:
+ raise isc.cc.data.DataTypeError(
+ "Value in convert_type not a string "
+ "specifying a dict")
+ except SyntaxError as se:
+ raise isc.cc.data.DataTypeError("Error parsing map: " + str(se))
else:
return value
except ValueError as err:
@@ -112,9 +132,9 @@ def _get_map_or_list(spec_part):
"""Returns the list or map specification if this is a list or a
map specification part. If not, returns the given spec_part
itself"""
- if "map_item_spec" in spec_part:
+ if spec_part_is_map(spec_part):
return spec_part["map_item_spec"]
- elif "list_item_spec" in spec_part:
+ elif spec_part_is_list(spec_part):
return spec_part["list_item_spec"]
else:
return spec_part
@@ -134,13 +154,13 @@ def _find_spec_part_single(cur_spec, id_part):
# list or a map, which is internally represented by a dict with
# an element 'map_item_spec', a dict with an element 'list_item_spec',
# or a list (when it is the 'main' config_data element of a module).
- if type(cur_spec) == dict and 'map_item_spec' in cur_spec.keys():
+ if spec_part_is_map(cur_spec):
for cur_spec_item in cur_spec['map_item_spec']:
if cur_spec_item['item_name'] == id:
return cur_spec_item
# not found
raise isc.cc.data.DataNotFoundError(id + " not found")
- elif type(cur_spec) == dict and 'list_item_spec' in cur_spec.keys():
+ elif spec_part_is_list(cur_spec):
if cur_spec['item_name'] == id:
return cur_spec['list_item_spec']
# not found
@@ -156,9 +176,22 @@ def _find_spec_part_single(cur_spec, id_part):
else:
raise isc.cc.data.DataNotFoundError("Not a correct config specification")
-def find_spec_part(element, identifier):
+def find_spec_part(element, identifier, strict_identifier = True):
"""find the data definition for the given identifier
- returns either a map with 'item_name' etc, or a list of those"""
+ returns either a map with 'item_name' etc, or a list of those
+ Parameters:
+ element: The specification element to start the search in
+ identifier: The element to find (relative to element above)
+ strict_identifier: If True (the default), additional checking occurs.
+ Currently the only check is whether a list index is
+ specified (except for the last part of the
+ identifier)
+ Raises a DataNotFoundError if the data is not found, or if
+ strict_identifier is True and any non-final identifier parts
+ (i.e. before the last /) identify a list element and do not contain
+ an index.
+ Returns the spec element identified by the given identifier.
+ """
if identifier == "":
return element
id_parts = identifier.split("/")
@@ -171,10 +204,15 @@ def find_spec_part(element, identifier):
# always want the 'full' spec of the item
for id_part in id_parts[:-1]:
cur_el = _find_spec_part_single(cur_el, id_part)
+ if strict_identifier and spec_part_is_list(cur_el) and\
+ not isc.cc.data.identifier_has_list_index(id_part):
+ raise isc.cc.data.DataNotFoundError(id_part +
+ " is a list and needs an index")
cur_el = _get_map_or_list(cur_el)
cur_el = _find_spec_part_single(cur_el, id_parts[-1])
- return cur_el
+ # Due to the raw datatypes we use, it is safer to return a deep copy here
+ return copy.deepcopy(cur_el)
def spec_name_list(spec, prefix="", recurse=False):
"""Returns a full list of all possible item identifiers in the
@@ -184,12 +222,12 @@ def spec_name_list(spec, prefix="", recurse=False):
if prefix != "" and not prefix.endswith("/"):
prefix += "/"
if type(spec) == dict:
- if 'map_item_spec' in spec:
+ if spec_part_is_map(spec):
for map_el in spec['map_item_spec']:
name = map_el['item_name']
if map_el['item_type'] == 'map':
name += "/"
- if recurse and 'map_item_spec' in map_el:
+ if recurse and spec_part_is_map(map_el):
result.extend(spec_name_list(map_el['map_item_spec'], prefix + map_el['item_name'], recurse))
else:
result.append(prefix + name)
@@ -244,7 +282,12 @@ class ConfigData:
def get_default_value(self, identifier):
"""Returns the default from the specification, or None if there
is no default"""
- spec = find_spec_part(self.specification.get_config_spec(), identifier)
+ # We are searching for the default value, so we can set
+ # strict_identifier to false (in fact, we need to; we may not know
+ # some list indices, or they may not exist, we are looking for
+ # a default value for a reason here).
+ spec = find_spec_part(self.specification.get_config_spec(),
+ identifier, False)
if spec and 'item_default' in spec:
return spec['item_default']
else:
@@ -313,6 +356,10 @@ class MultiConfigData:
self._current_config = {}
self._local_changes = {}
+ def clear_specifications(self):
+ """Remove all known module specifications"""
+ self._specifications = {}
+
def set_specification(self, spec):
"""Add or update a ModuleSpec. Raises a ConfigDataError is spec is not a ModuleSpec"""
if type(spec) != isc.config.ModuleSpec:
@@ -373,6 +420,14 @@ class MultiConfigData:
manager or the modules."""
return self._local_changes
+ def set_local_changes(self, new_local_changes):
+ """Sets the entire set of local changes, used when reverting
+ changes done automatically in case there was a problem (e.g.
+ when executing commands from a script that fails halfway
+ through).
+ """
+ self._local_changes = new_local_changes
+
def clear_local_changes(self):
"""Reverts all local changes"""
self._local_changes = {}
@@ -515,7 +570,7 @@ class MultiConfigData:
return value, self.CURRENT
if default:
value = self.get_default_value(identifier)
- if value != None:
+ if value is not None:
return value, self.DEFAULT
return None, self.NONE
@@ -534,8 +589,10 @@ class MultiConfigData:
if item_type == "list" and (all or first):
spec_part_list = spec_part['list_item_spec']
list_value, status = self.get_value(identifier)
+ # If not set, and no default, lists will show up as 'None',
+ # but it's better to treat it as an empty list then
if list_value is None:
- raise isc.cc.data.DataNotFoundError(identifier + " not found")
+ list_value = []
if type(list_value) != list:
# the identifier specified a single element
@@ -603,7 +660,7 @@ class MultiConfigData:
Throws DataNotFoundError if the identifier is bad
"""
result = []
- if not identifier:
+ if not identifier or identifier == "/":
# No identifier, so we need the list of current modules
for module in self._specifications.keys():
if all:
@@ -615,8 +672,11 @@ class MultiConfigData:
entry = _create_value_map_entry(module, 'module', None)
result.append(entry)
else:
- if identifier[0] == '/':
+ # Strip off start and end slashes, if they are there
+ if len(identifier) > 0 and identifier[0] == '/':
identifier = identifier[1:]
+ if len(identifier) > 0 and identifier[-1] == '/':
+ identifier = identifier[:-1]
module, sep, id = identifier.partition('/')
spec = self.get_module_spec(module)
if spec:
@@ -624,6 +684,16 @@ class MultiConfigData:
self._append_value_item(result, spec_part, identifier, all, True)
return result
+ def unset(self, identifier):
+ """
+ Reset the value to default.
+ """
+ spec_part = self.find_spec_part(identifier)
+ if spec_part is not None:
+ isc.cc.data.unset(self._local_changes, identifier)
+ else:
+ raise isc.cc.data.DataNotFoundError(identifier + "not found")
+
def set_value(self, identifier, value):
"""Set the local value at the given identifier to value. If
there is a specification for the given identifier, the type
@@ -649,7 +719,11 @@ class MultiConfigData:
id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
cur_value, status = self.get_value(cur_id_part + id)
# Check if the value was there in the first place
- if status == MultiConfigData.NONE and cur_id_part != "/":
+ # If we are at the final element, we do not care whether we found
+ # it, since if we have reached this point and it did not exist,
+ # it was apparently an optional value without a default.
+ if status == MultiConfigData.NONE and cur_id_part != "/" and\
+ cur_id_part + id != identifier:
raise isc.cc.data.DataNotFoundError(id_part +
" not found in " +
cur_id_part)
@@ -668,6 +742,15 @@ class MultiConfigData:
cur_id_part + id,
cur_value)
cur_id_part = cur_id_part + id_part + "/"
+
+ # We also need to copy to local if we are changing a named set,
+ # so that the other items in the set do not disappear
+ if spec_part_is_named_set(self.find_spec_part(cur_id_part)):
+ ns_value, ns_status = self.get_value(cur_id_part)
+ if ns_status != MultiConfigData.LOCAL:
+ isc.cc.data.set(self._local_changes,
+ cur_id_part,
+ ns_value)
isc.cc.data.set(self._local_changes, identifier, value)
def _get_list_items(self, item_name):
diff --git a/src/lib/python/isc/config/config_messages.mes b/src/lib/python/isc/config/config_messages.mes
index c52efb4..1fcf597 100644
--- a/src/lib/python/isc/config/config_messages.mes
+++ b/src/lib/python/isc/config/config_messages.mes
@@ -21,13 +21,19 @@
# have that at this moment. So when adding a message, make sure that
# the name is not already used in src/lib/config/config_messages.mes
-% CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1
-There was a logging configuration update, but the internal validator
-for logging configuration found that it contained errors. The errors
-are shown, and the update is ignored.
-
% CONFIG_GET_FAILED error getting configuration from cfgmgr: %1
The configuration manager returned an error response when the module
requested its configuration. The full error message answer from the
configuration manager is appended to the log error.
+% CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+
+% CONFIG_SESSION_STOPPING_FAILED error sending stopping message: %1
+There was a problem when sending a message signaling that the module using
+this CCSession is stopping. This message is sent so that the rest of the
+system is aware that the module is no longer running. Apart from logging
+this message, the error itself is ignored, and the ModuleCCSession is
+still stopped. The specific exception message is printed.
diff --git a/src/lib/python/isc/config/tests/.gitignore b/src/lib/python/isc/config/tests/.gitignore
new file mode 100644
index 0000000..52a9c5e
--- /dev/null
+++ b/src/lib/python/isc/config/tests/.gitignore
@@ -0,0 +1 @@
+/config_test
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 6670ee7..cb59e6f 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -22,6 +22,7 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index 1c63957..d1060bf 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -250,6 +250,18 @@ class TestModuleCCSession(unittest.TestCase):
self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
fake_session.get_message('ConfigManager', None))
+ def test_stop(self):
+ fake_session = FakeModuleCCSession()
+ self.assertFalse("Spec1" in fake_session.subscriptions)
+ mccs = self.create_session("spec1.spec", None, None, fake_session)
+ self.assertTrue("Spec1" in fake_session.subscriptions)
+
+ self.assertEqual(len(fake_session.message_queue), 0)
+ mccs.send_stopping()
+ self.assertEqual(len(fake_session.message_queue), 1)
+ self.assertEqual({'command': ['stopping', {'module_name': 'Spec1'}]},
+ fake_session.get_message('ConfigManager', None))
+
def test_get_socket(self):
fake_session = FakeModuleCCSession()
mccs = self.create_session("spec1.spec", None, None, fake_session)
@@ -476,45 +488,6 @@ class TestModuleCCSession(unittest.TestCase):
self.assertEqual({'result': [0]},
fake_session.get_message('Spec2', None))
- def test_check_command_without_recvmsg_remote_module(self):
- "copied from test_check_command3"
- fake_session = FakeModuleCCSession()
- mccs = self.create_session("spec1.spec", None, None, fake_session)
- mccs.set_config_handler(self.my_config_handler_ok)
- self.assertEqual(len(fake_session.message_queue), 0)
-
- fake_session.group_sendmsg(None, 'Spec2')
- rmodname = mccs.add_remote_config(self.spec_file("spec2.spec"))
- print(fake_session.message_queue)
- self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
- fake_session.get_message('ConfigManager', None))
- self.assertEqual(len(fake_session.message_queue), 0)
-
- cmd = isc.config.ccsession.create_command(isc.config.ccsession.COMMAND_CONFIG_UPDATE, { 'Spec2': { 'item1': 2 }})
- env = { 'group':'Spec2', 'from':None }
- self.assertEqual(len(fake_session.message_queue), 0)
- mccs.check_command_without_recvmsg(cmd, env)
- self.assertEqual(len(fake_session.message_queue), 0)
-
- def test_check_command_without_recvmsg_remote_module2(self):
- "copied from test_check_command3"
- fake_session = FakeModuleCCSession()
- mccs = self.create_session("spec1.spec", None, None, fake_session)
- mccs.set_config_handler(self.my_config_handler_ok)
- self.assertEqual(len(fake_session.message_queue), 0)
-
- fake_session.group_sendmsg(None, 'Spec2')
- rmodname = mccs.add_remote_config(self.spec_file("spec2.spec"))
- self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
- fake_session.get_message('ConfigManager', None))
- self.assertEqual(len(fake_session.message_queue), 0)
-
- cmd = isc.config.ccsession.create_command(isc.config.ccsession.COMMAND_CONFIG_UPDATE, { 'Spec3': { 'item1': 2 }})
- env = { 'group':'Spec3', 'from':None }
- self.assertEqual(len(fake_session.message_queue), 0)
- mccs.check_command_without_recvmsg(cmd, env)
- self.assertEqual(len(fake_session.message_queue), 0)
-
def test_check_command_block_timeout(self):
"""Check it works if session has timeout and it sets it back."""
def cmd_check(mccs, session):
@@ -542,16 +515,65 @@ class TestModuleCCSession(unittest.TestCase):
mccs.set_command_handler(self.my_command_handler_ok)
self.assertRaises(WouldBlockForever, lambda: mccs.check_command(False))
- def test_remote_module(self):
+ # Now there's a group of tests testing both add_remote_config and
+ # add_remote_config_by_name. Since they are almost the same (they differ
+ # just in the parameter and that the second one asks one more question over
+ # the bus), the actual test code is shared.
+ #
+ # These three functions are helper functions to easy up the writing of them.
+ # To write a test, there need to be 3 functions. First, the function that
+ # does the actual test. It looks like:
+ # def _internal_test(self, function_lambda, param, fill_other_messages):
+ #
+ # The function_lambda provides the tested function if called on the
+ # ccsession. The param is the parameter to pass to the function (either
+ # the module name or the spec file name. The fill_other_messages fills
+ # needed messages (the answer containing the module spec in case of add by
+ # name, no messages in the case of adding by spec file) into the fake bus.
+ # So, the code would look like:
+ #
+ # * Create the fake session and tested ccsession object
+ # * function = function_lambda(ccsession object)
+ # * fill_other_messages(fake session)
+ # * Fill in answer to the get_module_config command
+ # * Test by calling function(param)
+ #
+ # Then you need two wrappers that do launch the tests. There are helpers
+ # for that, so you can just call:
+ # def test_by_spec(self)
+ # self._common_remote_module_test(self._internal_test)
+ # def test_by_name(self)
+ # self._common_remote_module_by_name_test(self._internal_test)
+ def _common_remote_module_test(self, internal_test):
+ internal_test(lambda ccs: ccs.add_remote_config,
+ self.spec_file("spec2.spec"),
+ lambda session: None)
+
+ def _prepare_spec_message(self, session, spec_name):
+ # It could have been one command, but the line would be way too long
+ # to even split it
+ spec_file = self.spec_file(spec_name)
+ spec = isc.config.module_spec_from_file(spec_file)
+ session.group_sendmsg({'result': [0, spec.get_full_spec()]}, "Spec1")
+
+ def _common_remote_module_by_name_test(self, internal_test):
+ internal_test(lambda ccs: ccs.add_remote_config_by_name, "Spec2",
+ lambda session: self._prepare_spec_message(session,
+ "spec2.spec"))
+
+ def _internal_remote_module(self, function_lambda, parameter,
+ fill_other_messages):
fake_session = FakeModuleCCSession()
mccs = self.create_session("spec1.spec", None, None, fake_session)
mccs.remove_remote_config("Spec2")
+ function = function_lambda(mccs)
self.assertRaises(ModuleCCSessionError, mccs.get_remote_config_value, "Spec2", "item1")
self.assertFalse("Spec2" in fake_session.subscriptions)
+ fill_other_messages(fake_session)
fake_session.group_sendmsg(None, 'Spec2')
- rmodname = mccs.add_remote_config(self.spec_file("spec2.spec"))
+ rmodname = function(parameter)
self.assertTrue("Spec2" in fake_session.subscriptions)
self.assertEqual("Spec2", rmodname)
self.assertRaises(isc.cc.data.DataNotFoundError, mccs.get_remote_config_value, rmodname, "asdf")
@@ -563,36 +585,77 @@ class TestModuleCCSession(unittest.TestCase):
self.assertFalse("Spec2" in fake_session.subscriptions)
self.assertRaises(ModuleCCSessionError, mccs.get_remote_config_value, "Spec2", "item1")
- # test if unsubscription is alse sent when object is deleted
+ # test if unsubscription is also sent when object is deleted
+ fill_other_messages(fake_session)
fake_session.group_sendmsg({'result' : [0]}, 'Spec2')
- rmodname = mccs.add_remote_config(self.spec_file("spec2.spec"))
+ rmodname = function(parameter)
self.assertTrue("Spec2" in fake_session.subscriptions)
mccs = None
+ function = None
self.assertFalse("Spec2" in fake_session.subscriptions)
- def test_remote_module_with_custom_config(self):
+ def test_remote_module(self):
+ """
+ Test we can add a remote config and get the configuration.
+ Remote module specified by the spec file name.
+ """
+ self._common_remote_module_test(self._internal_remote_module)
+
+ def test_remote_module_by_name(self):
+ """
+ Test we can add a remote config and get the configuration.
+ Remote module specified its name.
+ """
+ self._common_remote_module_by_name_test(self._internal_remote_module)
+
+ def _internal_remote_module_with_custom_config(self, function_lambda,
+ parameter,
+ fill_other_messages):
fake_session = FakeModuleCCSession()
mccs = self.create_session("spec1.spec", None, None, fake_session)
- # override the default config value for "item1". add_remote_config()
- # should incorporate the overridden value, and we should be abel to
+ function = function_lambda(mccs)
+ # override the default config value for "item1". add_remote_config[_by_name]()
+ # should incorporate the overridden value, and we should be able to
# get it via get_remote_config_value().
+ fill_other_messages(fake_session)
fake_session.group_sendmsg({'result': [0, {"item1": 10}]}, 'Spec2')
- rmodname = mccs.add_remote_config(self.spec_file("spec2.spec"))
+ rmodname = function(parameter)
value, default = mccs.get_remote_config_value(rmodname, "item1")
self.assertEqual(10, value)
self.assertEqual(False, default)
- def test_ignore_command_remote_module(self):
+ def test_remote_module_with_custom_config(self):
+ """
+ Test the config of module will load non-default values on
+ initialization.
+ Remote module specified by the spec file name.
+ """
+ self._common_remote_module_test(
+ self._internal_remote_module_with_custom_config)
+
+ def test_remote_module_by_name_with_custom_config(self):
+ """
+ Test the config of module will load non-default values on
+ initialization.
+ Remote module its name.
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_remote_module_with_custom_config)
+
+ def _internal_ignore_command_remote_module(self, function_lambda, param,
+ fill_other_messages):
# Create a Spec1 module and subscribe to remote config for Spec2
fake_session = FakeModuleCCSession()
mccs = self.create_session("spec1.spec", None, None, fake_session)
mccs.set_command_handler(self.my_command_handler_ok)
+ function = function_lambda(mccs)
+ fill_other_messages(fake_session)
fake_session.group_sendmsg(None, 'Spec2')
- rmodname = mccs.add_remote_config(self.spec_file("spec2.spec"))
+ rmodname = function(param)
- # remove the 'get config' from the queue
- self.assertEqual(len(fake_session.message_queue), 1)
- fake_session.get_message("ConfigManager")
+ # remove the commands from queue
+ while len(fake_session.message_queue) > 0:
+ fake_session.get_message("ConfigManager")
# check if the command for the module itself is received
cmd = isc.config.ccsession.create_command("just_some_command", { 'foo': 'a' })
@@ -610,6 +673,174 @@ class TestModuleCCSession(unittest.TestCase):
mccs.check_command()
self.assertEqual(len(fake_session.message_queue), 0)
+ def test_ignore_commant_remote_module(self):
+ """
+ Test that commands for remote modules aren't handled.
+ Remote module specified by the spec file name.
+ """
+ self._common_remote_module_test(
+ self._internal_ignore_command_remote_module)
+
+ def test_ignore_commant_remote_module_by_name(self):
+ """
+ Test that commands for remote modules aren't handled.
+ Remote module specified by its name.
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_ignore_command_remote_module)
+
+ def _internal_check_command_without_recvmsg_remote_module(self,
+ function_lambda,
+ param,
+ fill_other_messages):
+ fake_session = FakeModuleCCSession()
+ mccs = self.create_session("spec1.spec", None, None, fake_session)
+ mccs.set_config_handler(self.my_config_handler_ok)
+ function = function_lambda(mccs)
+ self.assertEqual(len(fake_session.message_queue), 0)
+
+ fill_other_messages(fake_session)
+ fake_session.group_sendmsg(None, 'Spec2')
+ rmodname = function(param)
+ if (len(fake_session.message_queue) == 2):
+ self.assertEqual({'command': ['get_module_spec',
+ {'module_name': 'Spec2'}]},
+ fake_session.get_message('ConfigManager', None))
+ self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
+ fake_session.get_message('ConfigManager', None))
+ self.assertEqual(len(fake_session.message_queue), 0)
+
+ cmd = isc.config.ccsession.create_command(isc.config.ccsession.COMMAND_CONFIG_UPDATE, { 'Spec2': { 'item1': 2 }})
+ env = { 'group':'Spec2', 'from':None }
+ self.assertEqual(len(fake_session.message_queue), 0)
+ mccs.check_command_without_recvmsg(cmd, env)
+ self.assertEqual(len(fake_session.message_queue), 0)
+
+ def test_check_command_without_recvmsg_remote_module(self):
+ """
+ Test updates on remote module.
+ The remote module is specified by the spec file name.
+ """
+ self._common_remote_module_test(
+ self._internal_check_command_without_recvmsg_remote_module)
+
+ def test_check_command_without_recvmsg_remote_module_by_name(self):
+ """
+ Test updates on remote module.
+ The remote module is specified by its name.
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_check_command_without_recvmsg_remote_module)
+
+ def _internal_check_command_without_recvmsg_remote_module2(self,
+ function_lambda,
+ param,
+ fill_other_messages):
+ fake_session = FakeModuleCCSession()
+ mccs = self.create_session("spec1.spec", None, None, fake_session)
+ mccs.set_config_handler(self.my_config_handler_ok)
+ function = function_lambda(mccs)
+ self.assertEqual(len(fake_session.message_queue), 0)
+
+ fill_other_messages(fake_session)
+ fake_session.group_sendmsg(None, 'Spec2')
+ rmodname = function(param)
+ if (len(fake_session.message_queue) == 2):
+ self.assertEqual({'command': ['get_module_spec',
+ {'module_name': 'Spec2'}]},
+ fake_session.get_message('ConfigManager', None))
+ self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
+ fake_session.get_message('ConfigManager', None))
+ self.assertEqual(len(fake_session.message_queue), 0)
+
+ cmd = isc.config.ccsession.create_command(isc.config.ccsession.COMMAND_CONFIG_UPDATE, { 'Spec3': { 'item1': 2 }})
+ env = { 'group':'Spec3', 'from':None }
+ self.assertEqual(len(fake_session.message_queue), 0)
+ mccs.check_command_without_recvmsg(cmd, env)
+ self.assertEqual(len(fake_session.message_queue), 0)
+
+ def test_check_command_without_recvmsg_remote_module2(self):
+ """
+ Test updates on remote module.
+ The remote module is specified by the spec file name.
+ """
+ self._common_remote_module_test(
+ self._internal_check_command_without_recvmsg_remote_module2)
+
+ def test_check_command_without_recvmsg_remote_module_by_name2(self):
+ """
+ Test updates on remote module.
+ The remote module is specified by its name.
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_check_command_without_recvmsg_remote_module2)
+
+ def _internal_remote_module_bad_config(self, function_lambda, parameter,
+ fill_other_messages):
+ fake_session = FakeModuleCCSession()
+ mccs = self.create_session("spec1.spec", None, None, fake_session)
+ function = function_lambda(mccs)
+ # Provide wrong config data. It should be rejected.
+ fill_other_messages(fake_session)
+ fake_session.group_sendmsg({'result': [0, {"bad_item": -1}]}, 'Spec2')
+ self.assertRaises(isc.config.ModuleCCSessionError,
+ function, parameter)
+
+ def test_remote_module_bad_config(self):
+ """
+ Test the remote module rejects bad config data.
+ """
+ self._common_remote_module_test(
+ self._internal_remote_module_bad_config)
+
+ def test_remote_module_by_name_bad_config(self):
+ """
+ Test the remote module rejects bad config data.
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_remote_module_bad_config)
+
+ def _internal_remote_module_error_response(self, function_lambda,
+ parameter, fill_other_messages):
+ fake_session = FakeModuleCCSession()
+ mccs = self.create_session("spec1.spec", None, None, fake_session)
+ function = function_lambda(mccs)
+ # Provide wrong config data. It should be rejected.
+ fill_other_messages(fake_session)
+ fake_session.group_sendmsg({'result': [1, "An error, and I mean it!"]},
+ 'Spec2')
+ self.assertRaises(isc.config.ModuleCCSessionError,
+ function, parameter)
+
+ def test_remote_module_bad_config(self):
+ """
+ Test the remote module complains if there's an error response."
+ """
+ self._common_remote_module_test(
+ self._internal_remote_module_error_response)
+
+ def test_remote_module_by_name_bad_config(self):
+ """
+ Test the remote module complains if there's an error response."
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_remote_module_error_response)
+
+ def test_remote_module_bad_config(self):
+ """
+ Test the remote module rejects bad config data.
+ """
+ self._common_remote_module_by_name_test(
+ self._internal_remote_module_bad_config)
+
+ def test_module_name_mismatch(self):
+ fake_session = FakeModuleCCSession()
+ mccs = self.create_session("spec1.spec", None, None, fake_session)
+ mccs.set_config_handler(self.my_config_handler_ok)
+ self._prepare_spec_message(fake_session, 'spec1.spec')
+ self.assertRaises(isc.config.ModuleCCSessionError,
+ mccs.add_remote_config_by_name, "Spec2")
+
def test_logconfig_handler(self):
# test whether default_logconfig_handler reacts nicely to
# bad data. We assume the actual logger output is tested
@@ -701,6 +932,12 @@ class TestUIModuleCCSession(unittest.TestCase):
fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
return UIModuleCCSession(fake_conn)
+ def create_uccs_listtest(self, fake_conn):
+ module_spec = isc.config.module_spec_from_file(self.spec_file("spec39.spec"))
+ fake_conn.set_get_answer('/module_spec', { module_spec.get_module_name(): module_spec.get_full_spec()})
+ fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
+ return UIModuleCCSession(fake_conn)
+
def test_init(self):
fake_conn = fakeUIConn()
fake_conn.set_get_answer('/module_spec', {})
@@ -718,6 +955,38 @@ class TestUIModuleCCSession(unittest.TestCase):
fake_conn.set_get_answer('/config_data', { 'version': 123123 })
self.assertRaises(ModuleCCSessionError, UIModuleCCSession, fake_conn)
+ def test_request_specifications(self):
+ module_spec1 = isc.config.module_spec_from_file(
+ self.spec_file("spec1.spec"))
+ module_spec_dict1 = { "module_spec": module_spec1.get_full_spec() }
+ module_spec2 = isc.config.module_spec_from_file(
+ self.spec_file("spec2.spec"))
+ module_spec_dict2 = { "module_spec": module_spec2.get_full_spec() }
+
+ fake_conn = fakeUIConn()
+ # Set the first one in the answer
+ fake_conn.set_get_answer('/module_spec', module_spec_dict1)
+ fake_conn.set_get_answer('/config_data',
+ { 'version': BIND10_CONFIG_DATA_VERSION })
+ uccs = UIModuleCCSession(fake_conn)
+
+ # We should now have the first one, but not the second.
+ self.assertTrue("Spec1" in uccs._specifications)
+ self.assertEqual(module_spec1.get_full_spec(),
+ uccs._specifications["Spec1"].get_full_spec())
+ self.assertFalse("Spec2" in uccs._specifications)
+
+ # Now set an answer where only the second one is present
+ fake_conn.set_get_answer('/module_spec', module_spec_dict2)
+
+ uccs.request_specifications()
+
+ # Now Spec1 should have been removed, and spec2 should be there
+ self.assertFalse("Spec1" in uccs._specifications)
+ self.assertTrue("Spec2" in uccs._specifications)
+ self.assertEqual(module_spec2.get_full_spec(),
+ uccs._specifications["Spec2"].get_full_spec())
+
def test_add_remove_value(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs2(fake_conn)
@@ -751,11 +1020,30 @@ class TestUIModuleCCSession(unittest.TestCase):
self.assertRaises(isc.cc.data.DataTypeError,
uccs.remove_value, "Spec2/item5", None)
+ def test_add_dup_value(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_listtest(fake_conn)
+
+ uccs.add_value("Spec39/list")
+ self.assertRaises(isc.cc.data.DataAlreadyPresentError, uccs.add_value,
+ "Spec39/list")
+
def test_add_remove_value_named_set(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs_named_set(fake_conn)
value, status = uccs.get_value("/Spec32/named_set_item")
self.assertEqual({'a': 1, 'b': 2}, value)
+
+ # make sure that removing from default actually removes it
+ uccs.remove_value("/Spec32/named_set_item", "a")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'b': 2}, value)
+ self.assertEqual(uccs.LOCAL, status)
+
+ # ok, put it back now
+ uccs.add_value("/Spec32/named_set_item", "a")
+ uccs.set_value("/Spec32/named_set_item/a", 1)
+
uccs.add_value("/Spec32/named_set_item", "foo")
value, status = uccs.get_value("/Spec32/named_set_item")
self.assertEqual({'a': 1, 'b': 2, 'foo': 3}, value)
@@ -765,13 +1053,51 @@ class TestUIModuleCCSession(unittest.TestCase):
value, status = uccs.get_value("/Spec32/named_set_item")
self.assertEqual({'b': 2}, value)
+ uccs.set_value("/Spec32/named_set_item/c", 5)
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({"b": 2, "c": 5}, value)
+
self.assertRaises(isc.cc.data.DataNotFoundError,
uccs.set_value,
- "/Spec32/named_set_item/no_such_item",
+ "/Spec32/named_set_item/no_such_item/a",
4)
self.assertRaises(isc.cc.data.DataNotFoundError,
uccs.remove_value, "/Spec32/named_set_item",
"no_such_item")
+ self.assertRaises(isc.cc.data.DataAlreadyPresentError,
+ uccs.add_value, "/Spec32/named_set_item", "c")
+
+ def test_set_value_named_set(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_named_set(fake_conn)
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({}, value)
+ self.assertEqual(status, uccs.DEFAULT)
+
+ # Try setting a value that is optional but has no default
+ uccs.add_value("/Spec32/named_set_item2", "new1")
+ uccs.set_value("/Spec32/named_set_item2/new1/first", 3)
+ # Different method to add a new element
+ uccs.set_value("/Spec32/named_set_item2/new2", { "second": 4 })
+
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({ "new1": {"first": 3 }, "new2": {"second": 4}},
+ value)
+ self.assertEqual(status, uccs.LOCAL)
+
+ uccs.set_value("/Spec32/named_set_item2/new1/second", "foo")
+
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({ "new1": {"first": 3, "second": "foo" },
+ "new2": {"second": 4}},
+ value)
+ self.assertEqual(status, uccs.LOCAL)
+
+ # make sure using a bad name still fails
+ self.assertRaises(isc.cc.data.DataNotFoundError, uccs.set_value,
+ "/Spec32/named_set_item2/doesnotexist/first", 3)
+
+
def test_commit(self):
fake_conn = fakeUIConn()
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index eacc425..891a7d7 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -37,7 +37,7 @@ class TestConfigManagerData(unittest.TestCase):
It shouldn't append the data path to it.
"""
abs_path = self.data_path + os.sep + "b10-config-imaginary.db"
- data = ConfigManagerData(os.getcwd(), abs_path)
+ data = ConfigManagerData(self.data_path, abs_path)
self.assertEqual(abs_path, data.db_filename)
self.assertEqual(self.data_path, data.data_path)
@@ -74,6 +74,60 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(self.config_manager_data, new_config)
os.remove(output_file_name)
+ def check_existence(self, files, should_exist=[], should_not_exist=[]):
+ """Helper function for test_rename_config_file.
+ Arguments:
+ files: array of file names to check.
+ should_exist: array of indices, the files in 'files' with these
+ indices should exist.
+ should_not_exist: array of indices, the files in 'files' with
+ these indices should not exist."""
+ for n in should_exist:
+ self.assertTrue(os.path.exists(files[n]))
+ for n in should_not_exist:
+ self.assertFalse(os.path.exists(files[n]))
+
+ def test_rename_config_file(self):
+ # test file names, put in array for easy cleanup
+ filenames = [ "b10-config-rename-test",
+ "b10-config-rename-test.bak",
+ "b10-config-rename-test.bak.1",
+ "b10-config-rename-test.bak.2" ]
+
+ for filename in filenames:
+ if os.path.exists(filename):
+ os.remove(filename)
+
+ # The original does not exist, so the new one should not be created
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [], [0, 1, 2, 3])
+
+ # now create a file to rename, and call rename again
+ self.config_manager_data.write_to_file(filenames[0])
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [1], [0, 2, 3])
+
+ # If backup already exists, give it a new name automatically
+ self.config_manager_data.write_to_file(filenames[0])
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [1, 2], [0, 3])
+
+ # If backup already exists, give it a new name automatically with
+ # increasing postfix
+ self.config_manager_data.write_to_file(filenames[0])
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [1, 2, 3], [0])
+
+ # Test with explicit renamed file argument
+ self.config_manager_data.rename_config_file(filenames[1],
+ filenames[0])
+ self.check_existence(filenames, [0, 2, 3], [1])
+
+ # clean up again to be nice
+ for filename in filenames:
+ if os.path.exists(filename):
+ os.remove(filename)
+
def test_equality(self):
# tests the __eq__ function. Equality is only defined
# by equality of the .data element. If data_path or db_filename
@@ -88,7 +142,7 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(cfd1, cfd2)
cfd2.data['test'] = { 'a': [ 1, 2, 3]}
self.assertNotEqual(cfd1, cfd2)
-
+
class TestConfigManager(unittest.TestCase):
@@ -128,7 +182,7 @@ class TestConfigManager(unittest.TestCase):
msg = self.fake_session.get_message("Boss", None)
self.assert_(msg)
# this one is actually wrong, but 'current status quo'
- self.assertEqual(msg, {"running": "configmanager"})
+ self.assertEqual(msg, {"running": "ConfigManager"})
def test_set_module_spec(self):
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
@@ -198,8 +252,8 @@ class TestConfigManager(unittest.TestCase):
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
config_spec = self.cm.get_config_spec('Spec2')
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
-
-
+
+
def test_get_commands_spec(self):
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec, {})
@@ -240,9 +294,6 @@ class TestConfigManager(unittest.TestCase):
def test_read_config(self):
self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
- self.cm.read_config()
- # due to what get written, the value here is what the last set_config command in test_handle_msg does
- self.assertEqual(self.cm.config.data, {'TestModule': {'test': 125}, 'version': config_data.BIND10_CONFIG_DATA_VERSION})
self.cm.data_path = "/no_such_path"
self.cm.read_config()
self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
@@ -250,120 +301,200 @@ class TestConfigManager(unittest.TestCase):
def test_write_config(self):
# tested in ConfigManagerData tests
pass
-
+
def _handle_msg_helper(self, msg, expected_answer):
answer = self.cm.handle_msg(msg)
self.assertEqual(expected_answer, answer)
- def test_handle_msg(self):
- self._handle_msg_helper({}, { 'result': [ 1, 'Unknown message format: {}']})
- self._handle_msg_helper("", { 'result': [ 1, 'Unknown message format: ']})
- self._handle_msg_helper({ "command": [ "badcommand" ] }, { 'result': [ 1, "Unknown command: badcommand"]})
- self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, {} ]})
- self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, {} ]})
- self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, {} ]})
- self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "Spec2" } ] }, { 'result': [ 0, {} ]})
- #self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "nosuchmodule" } ] },
- # {'result': [1, 'No specification for module nosuchmodule']})
+ def test_handle_msg_basic_commands(self):
+ # Some basic commands, where not much interaction happens, just
+ # check the result
+ self._handle_msg_helper({},
+ { 'result': [ 1, 'Unknown message format: {}']})
+ self._handle_msg_helper("",
+ { 'result': [ 1, 'Unknown message format: ']})
+ self._handle_msg_helper({ "command": [ "badcommand" ] },
+ { 'result': [ 1, "Unknown command: badcommand"]})
+ self._handle_msg_helper({ "command": [ "get_commands_spec" ] },
+ { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] },
+ { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_module_spec" ] },
+ { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_module_spec",
+ { "module_name": "Spec2" } ] },
+ { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec", 1 ] },
- {'result': [1, 'Bad get_module_spec command, argument not a dict']})
+ {'result': [1, 'Bad get_module_spec command, '+
+ 'argument not a dict']})
self._handle_msg_helper({ "command": [ "get_module_spec", { } ] },
- {'result': [1, 'Bad module_name in get_module_spec command']})
- self._handle_msg_helper({ "command": [ "get_config" ] }, { 'result': [ 0, { 'version': config_data.BIND10_CONFIG_DATA_VERSION } ]})
- self._handle_msg_helper({ "command": [ "get_config", { "module_name": "nosuchmodule" } ] },
- {'result': [0, { 'version': config_data.BIND10_CONFIG_DATA_VERSION }]})
+ {'result': [1, 'Bad module_name in '+
+ 'get_module_spec command']})
+ self._handle_msg_helper({ "command": [ "get_config" ] },
+ { 'result': [ 0, { 'version':
+ config_data.BIND10_CONFIG_DATA_VERSION }]})
+ self._handle_msg_helper({ "command": [ "get_config",
+ { "module_name": "nosuchmodule" } ] },
+ {'result': [0, { 'version':
+ config_data.BIND10_CONFIG_DATA_VERSION }]})
self._handle_msg_helper({ "command": [ "get_config", 1 ] },
- {'result': [1, 'Bad get_config command, argument not a dict']})
+ {'result': [1, 'Bad get_config command, '+
+ 'argument not a dict']})
self._handle_msg_helper({ "command": [ "get_config", { } ] },
- {'result': [1, 'Bad module_name in get_config command']})
+ {'result': [1, 'Bad module_name in '+
+ 'get_config command']})
self._handle_msg_helper({ "command": [ "set_config" ] },
{'result': [1, 'Wrong number of arguments']})
self._handle_msg_helper({ "command": [ "set_config", [{}]] },
{'result': [0]})
+
self.assertEqual(len(self.fake_session.message_queue), 0)
- # the targets of some of these tests expect specific answers, put
- # those in our fake msgq first.
- my_ok_answer = { 'result': [ 0 ] }
+ def test_handle_msg_module_and_stats_commands(self):
+ self._handle_msg_helper({ "command":
+ ["module_spec", self.spec.get_full_spec()]
+ },
+ {'result': [0]})
+ # There should be a message on the queue about the 'new' Spec2 module
+ # from ConfigManager to Cmdctl, containing its name and full
+ # specification
+ self.assertEqual(ccsession.create_command(
+ ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
+ [ self.spec.get_module_name(),
+ self.spec.get_full_spec()]),
+ self.fake_session.get_message("Cmdctl", None))
+
+ self._handle_msg_helper({ "command": [ "module_spec", { 'foo': 1 } ] },
+ {'result': [1, 'Error in data definition: no '+
+ 'module_name in module_spec']})
+
+ self._handle_msg_helper({ "command": [ "get_module_spec" ] },
+ { 'result': [ 0, { self.spec.get_module_name():
+ self.spec.get_full_spec() } ]})
+ self._handle_msg_helper({ "command": [ "get_module_spec",
+ { "module_name" : "Spec2" } ] },
+ { 'result': [ 0, self.spec.get_full_spec() ] })
+ self._handle_msg_helper({ "command": [ "get_commands_spec" ] },
+ { 'result': [ 0, { self.spec.get_module_name():
+ self.spec.get_commands_spec()}]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] },
+ { 'result': [ 0, { self.spec.get_module_name():
+ self.spec.get_statistics_spec()}]})
+ def __test_handle_msg_update_config_helper(self, new_config):
+ # Helper function for the common pattern in
+ # test_handle_msg_update_config; send 'set config', check for
+ # update message, check if config has indeed been updated
+
+ my_ok_answer = { 'result': [ 0 ] }
# Send the 'ok' that cfgmgr expects back to the fake queue first
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- # then send the command
- self._handle_msg_helper({ "command": [ "set_config", [self.name, { "test": 123 }] ] },
+
+ config_version = config_data.BIND10_CONFIG_DATA_VERSION
+ self._handle_msg_helper({ "command": [ "set_config",
+ [ { "version": config_version,
+ self.name: new_config } ] ] },
my_ok_answer)
- # The cfgmgr should have eaten the ok message, and sent out an update again
+
+ # The cfgmgr should have eaten the ok message, and sent out an update
+ # message
self.assertEqual(len(self.fake_session.message_queue), 1)
- self.assertEqual({'command': [ 'config_update', {'test': 123}]},
+ self.assertEqual({'command': [ 'config_update', new_config]},
self.fake_session.get_message(self.name, None))
+
+ # Config should have been updated
+ self.assertEqual(self.cm.config.data, {self.name: new_config,
+ 'version': config_version})
+
# and the queue should now be empty again
self.assertEqual(len(self.fake_session.message_queue), 0)
- # below are variations of the theme above
- self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self._handle_msg_helper({ "command": [ "set_config", [self.name, { "test": 124 }] ] },
- my_ok_answer)
- self.assertEqual(len(self.fake_session.message_queue), 1)
- self.assertEqual({'command': [ 'config_update', {'test': 124}]},
- self.fake_session.get_message(self.name, None))
- self.assertEqual(len(self.fake_session.message_queue), 0)
+ def test_handle_msg_update_config(self):
+ # Update the configuration and check results a few times
+ # only work the first time
+ self.__test_handle_msg_update_config_helper({ "test": 123 })
+ self.__test_handle_msg_update_config_helper({ "test": 124 })
- # This is the last 'succes' one, the value set here is what test_read_config expects
- self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self._handle_msg_helper({ "command": [ "set_config", [ { self.name: { "test": 125 } }] ] },
- my_ok_answer )
- self.assertEqual(len(self.fake_session.message_queue), 1)
- self.assertEqual({'command': [ 'config_update', {'test': 125}]},
- self.fake_session.get_message(self.name, None))
- self.assertEqual(len(self.fake_session.message_queue), 0)
+ self.__test_handle_msg_update_config_helper({ "test": 125 })
+
+ self.__test_handle_msg_update_config_helper({ "test": 126 })
- my_bad_answer = { 'result': [1, "bad_answer"] }
+ # Now send an error result (i.e. config not accepted)
+ my_bad_answer = { 'result': [1, "bad config"] }
self.fake_session.group_sendmsg(my_bad_answer, "ConfigManager")
- self._handle_msg_helper({ "command": [ "set_config", [ self.name, { "test": 125 }] ] },
+ self._handle_msg_helper({ "command": [ "set_config",
+ [self.name, { "test": 127 }] ] },
my_bad_answer )
self.assertEqual(len(self.fake_session.message_queue), 1)
- self.assertEqual({'command': [ 'config_update', {'test': 125}]},
+ self.assertEqual({'command': [ 'config_update', {'test': 127}]},
self.fake_session.get_message(self.name, None))
+ # Config should not be updated due to the error
+ self.cm.read_config()
+ self.assertEqual(self.cm.config.data, { self.name: {'test': 126},
+ 'version': config_data.BIND10_CONFIG_DATA_VERSION})
+
self.assertEqual(len(self.fake_session.message_queue), 0)
self.fake_session.group_sendmsg(None, 'ConfigManager')
self._handle_msg_helper({ "command": [ "set_config", [ ] ] },
{'result': [1, 'Wrong number of arguments']} )
- self._handle_msg_helper({ "command": [ "set_config", [ self.name, { "test": 125 }] ] },
- { 'result': [1, 'No answer message from TestModule']} )
-
- #self.assertEqual(len(self.fake_session.message_queue), 1)
- #self.assertEqual({'config_update': {'test': 124}},
- # self.fake_session.get_message(self.name, None))
- #self.assertEqual({'version': 1, 'TestModule': {'test': 124}}, self.cm.config.data)
- #
- self._handle_msg_helper({ "command":
- ["module_spec", self.spec.get_full_spec()]
+ self._handle_msg_helper({ "command": [ "set_config",
+ [ self.name, { "test": 128 }]]},
+ { 'result': [1, 'No answer message '+
+ 'from TestModule']} )
+
+ # This command should leave a message to the TestModule to update its
+ # configuration (since the TestModule did not eat it)
+ self.assertEqual(len(self.fake_session.message_queue), 1)
+ self.assertEqual(
+ ccsession.create_command(ccsession.COMMAND_CONFIG_UPDATE,
+ { "test": 128 }),
+ self.fake_session.get_message("TestModule", None))
+
+ # Make sure queue is empty now
+ self.assertEqual(len(self.fake_session.message_queue), 0)
+
+ # Shutdown should result in 'ok' answer
+ self._handle_msg_helper({ "command":
+ ["shutdown"]
},
{'result': [0]})
- self._handle_msg_helper({ "command": [ "module_spec", { 'foo': 1 } ] },
- {'result': [1, 'Error in data definition: no module_name in module_spec']})
- self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_full_spec() } ]})
- self._handle_msg_helper({ "command": [ "get_module_spec",
- { "module_name" : "Spec2" } ] },
- { 'result': [ 0, self.spec.get_full_spec() ] })
- self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_commands_spec() } ]})
- self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_statistics_spec() } ]})
- # re-add this once we have new way to propagate spec changes (1 instead of the current 2 messages)
- #self.assertEqual(len(self.fake_session.message_queue), 2)
- # the name here is actually wrong (and hardcoded), but needed in the current version
- # TODO: fix that
- #self.assertEqual({'specification_update': [ self.name, self.spec ] },
- # self.fake_session.get_message("Cmdctl", None))
- #self.assertEqual({'commands_update': [ self.name, self.commands ] },
- # self.fake_session.get_message("Cmdctl", None))
-
- self._handle_msg_helper({ "command":
- ["shutdown"]
+
+ def test_stopping_message(self):
+ # Update the system by announcing this module
+ self._handle_msg_helper({ "command":
+ ["module_spec", self.spec.get_full_spec()]
},
{'result': [0]})
+ # This causes a update to be sent from the ConfigManager to the CmdCtl
+ # channel, containing the new module's name and full specification
+ self.assertEqual(ccsession.create_command(
+ ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
+ [ self.spec.get_module_name(),
+ self.spec.get_full_spec()]),
+ self.fake_session.get_message("Cmdctl", None))
+
+ # A stopping message should get no response, but should cause another
+ # message to be sent, if it is a known module
+ self._handle_msg_helper({ "command": [ "stopping",
+ { "module_name": "Spec2"}] },
+ None)
+ self.assertEqual(len(self.fake_session.message_queue), 1)
+ self.assertEqual({'command': [ 'module_specification_update',
+ ['Spec2', None] ] },
+ self.fake_session.get_message("Cmdctl", None))
+
+ # but if the 'stopping' module is either unknown or not running,
+ # no followup message should be sent
+ self._handle_msg_helper({ "command":
+ [ "stopping",
+ { "module_name": "NoSuchModule" } ] },
+ None)
+ self.assertEqual(len(self.fake_session.message_queue), 0)
+
def test_set_config_virtual(self):
"""Test that if the module is virtual, we don't send it over the
message bus, but call the checking function.
@@ -381,9 +512,9 @@ class TestConfigManager(unittest.TestCase):
self.cm.set_virtual_module(self.spec, check_test)
# The fake session will throw now if it tries to read a response.
# Handy, we don't need to find a complicated way to check for it.
- result = self.cm._handle_set_config_module(self.spec.
- get_module_name(),
- {'item1': value})
+ result = self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG,
+ [self.spec.get_module_name(), { "item1": value }]))
# Check the correct result is passed and our function was called
# With correct data
self.assertEqual(self.called_with['item1'], value)
@@ -415,19 +546,22 @@ class TestConfigManager(unittest.TestCase):
self.assertEqual({"version": 2}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value1": 123 }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value1": 123 }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value1": 123 }
}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value1": 124 }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value1": 124 }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value1": 124 }
}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value2": True }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value2": True }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value1": 124,
"value2": True
@@ -435,7 +569,8 @@ class TestConfigManager(unittest.TestCase):
}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value3": [ 1, 2, 3 ] }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value3": [ 1, 2, 3 ] }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value1": 124,
"value2": True,
@@ -444,7 +579,8 @@ class TestConfigManager(unittest.TestCase):
}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value2": False }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value2": False }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value1": 124,
"value2": False,
@@ -453,7 +589,8 @@ class TestConfigManager(unittest.TestCase):
}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value1": None }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value1": None }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value2": False,
"value3": [ 1, 2, 3 ]
@@ -461,7 +598,8 @@ class TestConfigManager(unittest.TestCase):
}, self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
- self.cm._handle_set_config_all({"test": { "value3": [ 1 ] }})
+ self.cm.handle_msg(ccsession.create_command(
+ ccsession.COMMAND_SET_CONFIG, ["test", { "value3": [ 1 ] }]))
self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION,
"test": { "value2": False,
"value3": [ 1 ]
@@ -472,14 +610,20 @@ class TestConfigManager(unittest.TestCase):
def test_run(self):
self.fake_session.group_sendmsg({ "command": [ "get_commands_spec" ] }, "ConfigManager")
self.fake_session.group_sendmsg({ "command": [ "get_statistics_spec" ] }, "ConfigManager")
+ self.fake_session.group_sendmsg({ "command": [ "stopping", { "module_name": "FooModule" } ] }, "ConfigManager")
self.fake_session.group_sendmsg({ "command": [ "shutdown" ] }, "ConfigManager")
+ self.assertEqual(len(self.fake_session.message_queue), 4)
self.cm.run()
- pass
+ # All commands should have been read out by run()
+ # Three of the commands should have been responded to, so the queue
+ # should now contain three answers
+ self.assertEqual(len(self.fake_session.message_queue), 3)
if __name__ == '__main__':
if not 'CONFIG_TESTDATA_PATH' in os.environ or not 'CONFIG_WR_TESTDATA_PATH' in os.environ:
print("You need to set the environment variable CONFIG_TESTDATA_PATH and CONFIG_WR_TESTDATA_PATH to point to the directory containing the test data files")
exit(1)
+ isc.log.init("unittests")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
-
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index 0dd441d..221ffa6 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -157,6 +157,7 @@ class TestConfigData(unittest.TestCase):
self.assertRaises(isc.cc.data.DataTypeError, convert_type, spec_part, [ "a", "b" ])
self.assertRaises(isc.cc.data.DataTypeError, convert_type, spec_part, [ "1", "b" ])
self.assertRaises(isc.cc.data.DataTypeError, convert_type, spec_part, { "a": 1 })
+ self.assertRaises(isc.cc.data.DataTypeError, convert_type, spec_part, "\"{ \"a\": 1 }\"")
spec_part = find_spec_part(config_spec, "value7")
self.assertEqual(['1', '2'], convert_type(spec_part, '1, 2'))
@@ -185,6 +186,47 @@ class TestConfigData(unittest.TestCase):
spec_part = find_spec_part(config_spec, "item6/value1")
self.assertEqual({'item_name': 'value1', 'item_type': 'string', 'item_optional': True, 'item_default': 'default'}, spec_part)
+ # make sure the returned data is a copy
+ spec_part['item_default'] = 'foo'
+ self.assertNotEqual(spec_part, find_spec_part(config_spec, "item6/value1"))
+
+ def test_find_spec_part_lists(self):
+ # A few specific tests for list data
+ module_spec = isc.config.module_spec_from_file(self.data_path +
+ os.sep +
+ "spec31.spec")
+ config_spec = module_spec.get_config_spec()
+
+ expected_spec_part = {'item_name': 'number',
+ 'item_type': 'integer',
+ 'item_default': 1,
+ 'item_optional': False}
+
+ # First a check for a correct fetch
+ spec_part = find_spec_part(config_spec,
+ "/first_list_items[0]/second_list_items[1]/"
+ "map_element/list1[1]/list2[1]")
+ self.assertEqual(expected_spec_part, spec_part)
+
+ # Leaving out an index should fail by default
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ find_spec_part, config_spec,
+ "/first_list_items[0]/second_list_items/"
+ "map_element/list1[1]/list2[1]")
+
+ # But not for the last element
+ spec_part = find_spec_part(config_spec,
+ "/first_list_items[0]/second_list_items[1]/"
+ "map_element/list1[1]/list2")
+ self.assertEqual(expected_spec_part, spec_part)
+
+ # And also not if strict_identifier is false (third argument)
+ spec_part = find_spec_part(config_spec,
+ "/first_list_items[0]/second_list_items/"
+ "map_element/list1[1]/list2[1]", False)
+ self.assertEqual(expected_spec_part, spec_part)
+
+
def test_spec_name_list(self):
name_list = spec_name_list(self.cd.get_module_spec().get_config_spec())
self.assertEqual(['item1', 'item2', 'item3', 'item4', 'item5', 'item6'], name_list)
@@ -312,6 +354,16 @@ class TestMultiConfigData(unittest.TestCase):
self.mcd.remove_specification(module_spec.get_module_name())
self.assertFalse(self.mcd.have_specification(module_spec.get_module_name()))
+ def test_clear_specifications(self):
+ self.assertEqual(0, len(self.mcd._specifications))
+ module_spec = isc.config.module_spec_from_file(self.data_path +
+ os.sep +
+ "spec1.spec")
+ self.mcd.set_specification(module_spec)
+ self.assertEqual(1, len(self.mcd._specifications))
+ self.mcd.clear_specifications()
+ self.assertEqual(0, len(self.mcd._specifications))
+
def test_get_module_spec(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
self.mcd.set_specification(module_spec)
@@ -372,7 +424,14 @@ class TestMultiConfigData(unittest.TestCase):
self.mcd.set_value("Spec2/item1", 2)
local_changes = self.mcd.get_local_changes()
self.assertEqual({"Spec2": { "item1": 2}}, local_changes)
-
+
+ def test_set_local_changes(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.mcd.set_specification(module_spec)
+ self.assertEqual({}, self.mcd.get_local_changes())
+ new_local_changes = {"Spec2": { "item1": 2}}
+ self.mcd.set_local_changes(new_local_changes)
+ self.assertEqual(new_local_changes, self.mcd.get_local_changes())
def test_clear_local_changes(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
@@ -473,15 +532,25 @@ class TestMultiConfigData(unittest.TestCase):
self.assertEqual(MultiConfigData.DEFAULT, status)
-
def test_get_value_maps(self):
maps = self.mcd.get_value_maps()
self.assertEqual([], maps)
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
self.mcd.set_specification(module_spec)
+
+ expected = [{'default': False,
+ 'type': 'module',
+ 'name': 'Spec1',
+ 'value': None,
+ 'modified': False}]
+
maps = self.mcd.get_value_maps()
- self.assertEqual([{'default': False, 'type': 'module', 'name': 'Spec1', 'value': None, 'modified': False}], maps)
+ self.assertEqual(expected, maps)
+
+ maps = self.mcd.get_value_maps("/")
+ self.assertEqual(expected, maps)
+
maps = self.mcd.get_value_maps('Spec2')
self.assertEqual([], maps)
maps = self.mcd.get_value_maps('Spec1')
@@ -527,8 +596,10 @@ class TestMultiConfigData(unittest.TestCase):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec24.spec")
self.mcd.set_specification(module_spec)
- self.assertRaises(isc.cc.data.DataNotFoundError,
- self.mcd.get_value_maps, "/Spec24/item", 4)
+ # optional list item that is not set should return as empty list
+ maps = self.mcd.get_value_maps("/Spec24/item", 4)
+ self.assertEqual([{'default': False, 'type': 'list', 'name': 'Spec24/item', 'value': [], 'modified': False}], maps)
+
self.mcd._set_current_config({ "Spec24": { "item": [] } })
maps = self.mcd.get_value_maps("/Spec24/item")
self.assertEqual([{'default': False, 'modified': False, 'name': 'Spec24/item', 'type': 'list', 'value': []}], maps)
@@ -556,6 +627,20 @@ class TestMultiConfigData(unittest.TestCase):
maps = self.mcd.get_value_maps("/Spec22/value9")
self.assertEqual(expected, maps)
+ # A slash at the end should not produce different output
+ maps = self.mcd.get_value_maps("/Spec22/value9/")
+ self.assertEqual(expected, maps)
+
+ # A slash at the end should not produce different output with
+ # indices too
+ expected2 = [{'default': True,
+ 'type': 'integer',
+ 'name': 'Spec22/value5[1]',
+ 'value': 'b',
+ 'modified': False}]
+ maps = self.mcd.get_value_maps("/Spec22/value5[1]/")
+ self.assertEqual(expected2, maps)
+
def test_get_value_maps_named_set(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
self.mcd.set_specification(module_spec)
@@ -595,7 +680,38 @@ class TestMultiConfigData(unittest.TestCase):
self.assertEqual(MultiConfigData.LOCAL, status)
self.assertRaises(isc.cc.data.DataTypeError, self.mcd.set_value, "Spec2/item5[a]", "asdf")
-
+
+
+ def test_unset(self):
+ """
+ Test the unset command works.
+ """
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.mcd.set_specification(module_spec)
+ self.mcd.set_specification(module_spec)
+ value, status = self.mcd.get_value("Spec2/item1")
+ # This is the default first
+ self.assertEqual(1, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+ # Unseting a default item does nothing.
+ self.mcd.unset("Spec2/item1")
+ value, status = self.mcd.get_value("Spec2/item1")
+ # This should be the default
+ self.assertEqual(1, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+ # Set it to something else
+ self.mcd.set_value("Spec2/item1", 42)
+ value, status = self.mcd.get_value("Spec2/item1")
+ self.assertEqual(42, value)
+ self.assertEqual(MultiConfigData.LOCAL, status)
+ # Try to unset it
+ self.mcd.unset("Spec2/item1")
+ value, status = self.mcd.get_value("Spec2/item1")
+ # This should be the default
+ self.assertEqual(1, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+ # Unset a nonexisting item. Should raise.
+ self.assertRaises(isc.cc.data.DataNotFoundError, self.mcd.unset, "Spec2/doesnotexist")
def test_get_config_item_list(self):
config_items = self.mcd.get_config_item_list()
@@ -617,6 +733,12 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list("Spec2", True)
self.assertEqual(['Spec2/item1', 'Spec2/item2', 'Spec2/item3', 'Spec2/item4', 'Spec2/item5', 'Spec2/item6/value1', 'Spec2/item6/value2'], config_items)
+ def test_is_named_set(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ spec_part = self.mcd.find_spec_part("Spec32/named_set_item")
+ self.assertTrue(spec_part_is_named_set(spec_part))
+
def test_get_config_item_list_named_set(self):
config_items = self.mcd.get_config_item_list()
self.assertEqual([], config_items)
@@ -627,7 +749,7 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list(None, False)
self.assertEqual(['Spec32'], config_items)
config_items = self.mcd.get_config_item_list(None, True)
- self.assertEqual(['Spec32/named_set_item'], config_items)
+ self.assertEqual(['Spec32/named_set_item', 'Spec32/named_set_item2'], config_items)
self.mcd.set_value('Spec32/named_set_item', { "aaaa": 4, "aabb": 5, "bbbb": 6})
config_items = self.mcd.get_config_item_list("/Spec32/named_set_item", True)
self.assertEqual(['Spec32/named_set_item/aaaa',
@@ -635,6 +757,20 @@ class TestMultiConfigData(unittest.TestCase):
'Spec32/named_set_item/bbbb',
], config_items)
+ def test_set_named_set_nonlocal(self):
+ # Test whether a default named set is copied to local if a subitem
+ # is changed, and that other items in the set do not get lost
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + 'spec32.spec')
+ self.mcd.set_specification(module_spec)
+ value, status = self.mcd.get_value('Spec32/named_set_item')
+ self.assertEqual({'a': 1, 'b': 2}, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+
+ self.mcd.set_value('Spec32/named_set_item/b', 3)
+ value, status = self.mcd.get_value('Spec32/named_set_item')
+ self.assertEqual({'a': 1, 'b': 3}, value)
+ self.assertEqual(MultiConfigData.LOCAL, status)
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index fc53d23..bb2bcda 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -46,8 +46,8 @@ class TestModuleSpec(unittest.TestCase):
self.spec1(dd)
def test_open_file_obj(self):
- file1 = open(self.spec_file("spec1.spec"))
- dd = isc.config.module_spec_from_file(file1)
+ with open(self.spec_file("spec1.spec")) as file1:
+ dd = isc.config.module_spec_from_file(file1)
self.spec1(dd)
def test_open_bad_file_obj(self):
@@ -89,8 +89,8 @@ class TestModuleSpec(unittest.TestCase):
def validate_data(self, specfile_name, datafile_name):
dd = self.read_spec_file(specfile_name);
- data_file = open(self.spec_file(datafile_name))
- data_str = data_file.read()
+ with open(self.spec_file(datafile_name)) as data_file:
+ data_str = data_file.read()
data = isc.cc.data.parse_value_str(data_str)
return dd.validate_config(True, data)
@@ -109,8 +109,8 @@ class TestModuleSpec(unittest.TestCase):
def validate_command_params(self, specfile_name, datafile_name, cmd_name):
dd = self.read_spec_file(specfile_name);
- data_file = open(self.spec_file(datafile_name))
- data_str = data_file.read()
+ with open(self.spec_file(datafile_name)) as data_file:
+ data_str = data_file.read()
params = isc.cc.data.parse_value_str(data_str)
return dd.validate_command(cmd_name, params)
@@ -131,8 +131,8 @@ class TestModuleSpec(unittest.TestCase):
def test_statistics_validation(self):
def _validate_stat(specfile_name, datafile_name):
dd = self.read_spec_file(specfile_name);
- data_file = open(self.spec_file(datafile_name))
- data_str = data_file.read()
+ with open(self.spec_file(datafile_name)) as data_file:
+ data_str = data_file.read()
data = isc.cc.data.parse_value_str(data_str)
return dd.validate_statistics(True, data, [])
self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None));
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 60282d9..1d862db 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -8,6 +8,7 @@ python_PYTHON = __init__.py master.py sqlite3_ds.py
# new data
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += $(SQLITE_CFLAGS)
python_LTLIBRARIES = datasrc.la
@@ -16,12 +17,14 @@ datasrc_la_SOURCES += client_python.cc client_python.h
datasrc_la_SOURCES += iterator_python.cc iterator_python.h
datasrc_la_SOURCES += finder_python.cc finder_python.h
datasrc_la_SOURCES += updater_python.cc updater_python.h
+datasrc_la_SOURCES += journal_reader_python.cc journal_reader_python.h
datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
-datasrc_la_LDFLAGS += -module
+datasrc_la_LDFLAGS += -module -avoid-version
datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
datasrc_la_LIBADD += $(PYTHON_LIB)
@@ -29,6 +32,7 @@ EXTRA_DIST = client_inc.cc
EXTRA_DIST += finder_inc.cc
EXTRA_DIST += iterator_inc.cc
EXTRA_DIST += updater_inc.cc
+EXTRA_DIST += journal_reader_inc.cc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index b81f48d..e0c0f06 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -89,7 +89,7 @@ None\n\
";
const char* const DataSourceClient_getIterator_doc = "\
-get_iterator(name) -> ZoneIterator\n\
+get_iterator(name, separate_rrs=False) -> ZoneIterator\n\
\n\
Returns an iterator to the given zone.\n\
\n\
@@ -111,12 +111,18 @@ anything else.\n\
Parameters:\n\
isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
nearest match as find_zone.\n\
+ separate_rrs If true, the iterator will return each RR as a\n\
+ new RRset object. If false, the iterator will\n\
+ combine consecutive RRs with the name and type\n\
+ into 1 RRset. The capitalization of the RRset will\n\
+ be that of the first RR read, and TTLs will be\n\
+ adjusted to the lowest one found.\n\
\n\
Return Value(s): Pointer to the iterator.\n\
";
const char* const DataSourceClient_getUpdater_doc = "\
-get_updater(name, replace) -> ZoneUpdater\n\
+get_updater(name, replace, journaling=False) -> ZoneUpdater\n\
\n\
Return an updater to make updates to a specific zone.\n\
\n\
@@ -157,6 +163,22 @@ A data source can be \"read only\" or can prohibit partial updates. In\n\
such cases this method will result in an isc.datasrc.NotImplemented exception\n\
unconditionally or when replace is false).\n\
\n\
+If journaling is True, the data source should store a journal of\n\
+changes. These can be used later on by, for example, IXFR-out.\n\
+However, the parameter is a hint only. It might be unable to store\n\
+them and they would be silently discarded. Or it might need to store\n\
+them no matter what (for example a git-based data source would store\n\
+journal implicitly). When the journaling is True, it requires that the\n\
+following update be formatted as IXFR transfer (SOA to be removed,\n\
+bunch of RRs to be removed, SOA to be added, bunch of RRs to be added,\n\
+and possibly repeated). However, it is not required that the updater\n\
+checks that. If it is False, it must not require so and must accept\n\
+any order of changes.\n\
+\n\
+We don't support erasing the whole zone (by replace being True) and\n\
+saving a journal at the same time. In such situation, isc.datasrc.Error\n\
+is thrown.\n\
+\n\
Exceptions:\n\
isc.datasrc. NotImplemented The underlying data source does not support\n\
updates.\n\
@@ -165,6 +187,63 @@ Exceptions:\n\
Parameters:\n\
name The zone name to be updated\n\
replace Whether to delete existing RRs before making updates\n\
+ journaling The zone updater should store a journal of the changes.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+// pointer -> (removed)
+// Null -> None
+// exception types
+const char* const DataSourceClient_getJournalReader_doc = "\
+get_journal_reader(zone, begin_serial, end_serial) ->\n\
+ (int, ZoneJournalReader)\n\
+\n\
+Return a journal reader to retrieve differences of a zone.\n\
+\n\
+A derived version of this method creates a concrete ZoneJournalReader\n\
+object specific to the underlying data source for the specified name\n\
+of zone and differences between the versions specified by the\n\
+beginning and ending serials of the corresponding SOA RRs. The RR\n\
+class of the zone is the one that the client is expected to handle\n\
+(see the detailed description of this class).\n\
+\n\
+Note that the SOA serials are compared by the semantics of the serial\n\
+number arithmetic. So, for example, begin_serial can be larger than\n\
+end_serial as bare unsigned integers. The underlying data source\n\
+implementation is assumed to keep track of sufficient history to\n\
+identify (if exist) the corresponding difference between the specified\n\
+versions.\n\
+\n\
+This method returns the result as a pair of a result code and a\n\
+ZoneJournalReader object. On success, the result code is\n\
+SUCCESS and the object must not be None; otherwise the result code is\n\
+something other than SUCCESS and the object must be None.\n\
+\n\
+If the specified zone is not found in the data source, the result code\n\
+is NO_SUCH_ZONE. Otherwise, if specified range of difference for the\n\
+zone is not found in the data source, the result code is\n\
+NO_SUCH_VERSION.\n\
+\n\
+Handling differences is an optional feature of data source. If the\n\
+underlying data source does not support difference handling, this\n\
+method for that type of data source can throw an exception of class\n\
+isc.datasrc.NotImplemented.\n\
\n\
+Exceptions:\n\
+ isc.datasrc.NotImplemented The data source does not support differences.\n\
+ isc.datasrc.Error Other operational errors at the data source level.\n\
+ SystemError An unexpected error in the backend C++ code. Either a rare\n\
+ system error such as short memory or an implementation bug.\n\
+\n\
+Parameters:\n\
+ zone The name of the zone for which the difference should be\n\
+ retrieved.\n\
+ begin_serial The SOA serial of the beginning version of the\n\
+ differences.\n\
+ end_serial The SOA serial of the ending version of the differences.\n\
+\n\
+Return Value(s): A pair of result code and a ZoneJournalReader object\n\
+(which can be None)\n \
";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index caebd25..bdf84a3 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -38,6 +38,7 @@
#include "finder_python.h"
#include "iterator_python.h"
#include "updater_python.h"
+#include "journal_reader_python.h"
#include "client_inc.cc"
using namespace std;
@@ -83,11 +84,27 @@ DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
PyObject*
DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
- PyObject *name_obj;
- if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ PyObject* name_obj;
+ PyObject* separate_rrs_obj = NULL;
+ if (PyArg_ParseTuple(args, "O!|O", &name_type, &name_obj,
+ &separate_rrs_obj)) {
try {
+ bool separate_rrs = false;
+ if (separate_rrs_obj != NULL) {
+ // store result in local var so we can explicitely check for
+ // -1 error return value
+ int separate_rrs_true = PyObject_IsTrue(separate_rrs_obj);
+ if (separate_rrs_true == 1) {
+ separate_rrs = true;
+ } else if (separate_rrs_true == -1) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Error getting value of separate_rrs");
+ return (NULL);
+ }
+ }
return (createZoneIteratorObject(
- self->cppobj->getInstance().getIterator(PyName_ToName(name_obj)),
+ self->cppobj->getInstance().getIterator(PyName_ToName(name_obj),
+ separate_rrs),
po_self));
} catch (const isc::NotImplemented& ne) {
PyErr_SetString(getDataSourceException("NotImplemented"),
@@ -113,14 +130,17 @@ PyObject*
DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
PyObject *name_obj;
- PyObject *replace_obj;
- if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
- PyBool_Check(replace_obj)) {
- bool replace = (replace_obj != Py_False);
+ PyObject *replace_obj = NULL;
+ PyObject *journaling_obj = Py_False;
+ if (PyArg_ParseTuple(args, "O!O|O", &name_type, &name_obj,
+ &replace_obj, &journaling_obj) &&
+ PyBool_Check(replace_obj) && PyBool_Check(journaling_obj)) {
+ const bool replace = (replace_obj != Py_False);
+ const bool journaling = (journaling_obj == Py_True);
try {
ZoneUpdaterPtr updater =
self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
- replace);
+ replace, journaling);
if (!updater) {
return (Py_None);
}
@@ -141,10 +161,56 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
return (NULL);
}
} else {
+ // PyBool_Check doesn't set the error, so we have to set it ourselves.
+ if (replace_obj != NULL && !PyBool_Check(replace_obj)) {
+ PyErr_SetString(PyExc_TypeError, "'replace' for "
+ "DataSourceClient.get_updater must be boolean");
+ }
+ if (!PyBool_Check(journaling_obj)) {
+ PyErr_SetString(PyExc_TypeError, "'journaling' for "
+ "DataSourceClient.get_updater must be boolean");
+ }
return (NULL);
}
}
+PyObject*
+DataSourceClient_getJournalReader(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ unsigned long begin_obj, end_obj;
+
+ if (PyArg_ParseTuple(args, "O!kk", &name_type, &name_obj,
+ &begin_obj, &end_obj)) {
+ try {
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ self->cppobj->getInstance().getJournalReader(
+ PyName_ToName(name_obj), static_cast<uint32_t>(begin_obj),
+ static_cast<uint32_t>(end_obj));
+ PyObject* po_reader;
+ if (result.first == ZoneJournalReader::SUCCESS) {
+ po_reader = createZoneJournalReaderObject(result.second,
+ po_self);
+ } else {
+ po_reader = Py_None;
+ Py_INCREF(po_reader); // this will soon be released
+ }
+ PyObjectContainer container(po_reader);
+ return (Py_BuildValue("(iO)", result.first, container.get()));
+ } catch (const isc::NotImplemented& ex) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ex.what());
+ } catch (const DataSourceError& ex) {
+ PyErr_SetString(getDataSourceException("Error"), ex.what());
+ } catch (const std::exception& ex) {
+ PyErr_SetString(PyExc_SystemError, ex.what());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected exception");
+ }
+ }
+ return (NULL);
+}
+
// This list contains the actual set of functions we have in
// python. Each entry has
// 1. Python method name
@@ -152,18 +218,21 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
// 3. Argument type
// 4. Documentation
PyMethodDef DataSourceClient_methods[] = {
- { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
- METH_VARARGS, DataSourceClient_findZone_doc },
+ { "find_zone", DataSourceClient_findZone, METH_VARARGS,
+ DataSourceClient_findZone_doc },
{ "get_iterator",
- reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator, METH_VARARGS,
DataSourceClient_getIterator_doc },
- { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ { "get_updater", DataSourceClient_getUpdater,
METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { "get_journal_reader", DataSourceClient_getJournalReader,
+ METH_VARARGS, DataSourceClient_getJournalReader_doc },
{ NULL, NULL, 0, NULL }
};
int
-DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+DataSourceClient_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_DataSourceClient* self = static_cast<s_DataSourceClient*>(po_self);
char* ds_type_str;
char* ds_config_str;
try {
@@ -208,7 +277,8 @@ DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
}
void
-DataSourceClient_destroy(s_DataSourceClient* const self) {
+DataSourceClient_destroy(PyObject* po_self) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
delete self->cppobj;
self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
@@ -227,7 +297,7 @@ PyTypeObject datasourceclient_type = {
"datasrc.DataSourceClient",
sizeof(s_DataSourceClient), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ DataSourceClient_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -258,7 +328,7 @@ PyTypeObject datasourceclient_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ DataSourceClient_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 6ab29d8..f31d10a 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -27,6 +27,7 @@
#include "finder_python.h"
#include "iterator_python.h"
#include "updater_python.h"
+#include "journal_reader_python.h"
#include <util/python/pycppwrapper_util.h>
#include <dns/python/pydnspp_common.h>
@@ -128,12 +129,6 @@ initModulePart_ZoneFinder(PyObject* mod) {
Py_BuildValue("I", ZoneFinder::CNAME));
installClassVariable(zonefinder_type, "DNAME",
Py_BuildValue("I", ZoneFinder::DNAME));
- installClassVariable(zonefinder_type, "WILDCARD",
- Py_BuildValue("I", ZoneFinder::WILDCARD));
- installClassVariable(zonefinder_type, "WILDCARD_NXRRSET",
- Py_BuildValue("I", ZoneFinder::WILDCARD_NXRRSET));
- installClassVariable(zonefinder_type, "WILDCARD_CNAME",
- Py_BuildValue("I", ZoneFinder::WILDCARD_CNAME));
installClassVariable(zonefinder_type, "FIND_DEFAULT",
Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
@@ -143,6 +138,15 @@ initModulePart_ZoneFinder(PyObject* mod) {
Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
installClassVariable(zonefinder_type, "NO_WILDCARD",
Py_BuildValue("I", ZoneFinder::NO_WILDCARD));
+
+ installClassVariable(zonefinder_type, "RESULT_WILDCARD",
+ Py_BuildValue("I", ZoneFinder::RESULT_WILDCARD));
+ installClassVariable(zonefinder_type, "RESULT_NSEC_SIGNED",
+ Py_BuildValue("I",
+ ZoneFinder::RESULT_NSEC_SIGNED));
+ installClassVariable(zonefinder_type, "RESULT_NSEC3_SIGNED",
+ Py_BuildValue("I",
+ ZoneFinder::RESULT_NSEC3_SIGNED));
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in ZoneFinder initialization: " +
@@ -192,8 +196,44 @@ initModulePart_ZoneUpdater(PyObject* mod) {
return (true);
}
+bool
+initModulePart_ZoneJournalReader(PyObject* mod) {
+ if (PyType_Ready(&journal_reader_type) < 0) {
+ return (false);
+ }
+ void* p = &journal_reader_type;
+ if (PyModule_AddObject(mod, "ZoneJournalReader",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&journal_reader_type);
+
+ try {
+ installClassVariable(journal_reader_type, "SUCCESS",
+ Py_BuildValue("I", ZoneJournalReader::SUCCESS));
+ installClassVariable(journal_reader_type, "NO_SUCH_ZONE",
+ Py_BuildValue("I",
+ ZoneJournalReader::NO_SUCH_ZONE));
+ installClassVariable(journal_reader_type, "NO_SUCH_VERSION",
+ Py_BuildValue("I",
+ ZoneJournalReader::NO_SUCH_VERSION));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in ZoneJournalReader initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in ZoneJournalReader initialization");
+ return (false);
+ }
+
+ return (true);
+}
PyObject* po_DataSourceError;
+PyObject* po_OutOfZone;
PyObject* po_NotImplemented;
PyModuleDef iscDataSrc = {
@@ -239,10 +279,17 @@ PyInit_datasrc(void) {
return (NULL);
}
+ if (!initModulePart_ZoneJournalReader(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
try {
po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
NULL);
PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+ po_OutOfZone = PyErr_NewException("isc.datasrc.OutOfZone", NULL, NULL);
+ PyObjectContainer(po_OutOfZone).installToModule(mod, "OutOfZone");
po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
NULL, NULL);
PyObjectContainer(po_NotImplemented).installToModule(mod,
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
index 4a00e78..467c6ad 100644
--- a/src/lib/python/isc/datasrc/finder_inc.cc
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -47,7 +47,7 @@ Return the RR class of the zone.\n\
// - NULL->None
// - exceptions
const char* const ZoneFinder_find_doc = "\
-find(name, type, target=None, options=FIND_DEFAULT) -> (integer, RRset)\n\
+find(name, type, options=FIND_DEFAULT) -> (integer, RRset, integer)\n\
\n\
Search the zone for a given pair of domain name and RR type.\n\
\n\
@@ -58,12 +58,10 @@ answer for the search key. Specifically,\n\
\n\
- If the search name belongs under a zone cut, it returns the code of\n\
DELEGATION and the NS RRset at the zone cut.\n\
-- If there is no matching name, it returns the code of NXDOMAIN, and,\n\
- if DNSSEC is requested, the NSEC RRset that proves the non-\n\
- existence.\n\
+- If there is no matching name, it returns the code of NXDOMAIN.\n\
- If there is a matching name but no RRset of the search type, it\n\
- returns the code of NXRRSET, and, if DNSSEC is required, the NSEC\n\
- RRset for that name.\n\
+ returns the code of NXRRSET. This case includes the search name\n\
+ matches an empty node of the zone.\n\
- If there is a CNAME RR of the searched name but there is no RR of\n\
the searched type of the name (so this type is different from\n\
CNAME), it returns the code of CNAME and that CNAME RR. Note that if\n\
@@ -71,13 +69,16 @@ answer for the search key. Specifically,\n\
and the code of SUCCESS will be returned.\n\
- If the search name matches a delegation point of DNAME, it returns\n\
the code of DNAME and that DNAME RR.\n\
-- If the target isn't None, all RRsets under the domain are inserted\n\
- there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
- instead of normall processing. This is intended to handle ANY query.\n\
\n\
-Note: This behavior is controversial as we discussed in\n\
-https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html We\n\
-should revisit the interface before we heavily rely on it.\n\
+No RRset will be returned in the NXDOMAIN and NXRRSET cases (the\n\
+second element of the tuple will be None), unless DNSSEC data are\n\
+required. See below for the cases with DNSSEC.\n\
+\n\
+The third element of the returned tuple provides supplemental\n\
+information about the search result in the form of a bitmask (called\n\
+\"flags\"). Such information may be useful for the caller if the\n\
+caller wants to collect additional DNSSEC proofs based on the search\n\
+result.\n\
\n\
The options parameter specifies customized behavior of the search.\n\
Their semantics is as follows (they are or bit-field):\n\
@@ -98,6 +99,94 @@ Their semantics is as follows (they are or bit-field):\n\
of the non existence of any matching wildcard or non existence of an\n\
exact match when a wildcard match is found.\n\
\n\
+Name is expected to be included in the zone, that is, it\n\
+should be equal to or a subdomain of the zone origin. Otherwise an\n\
+OutOfZoneFind exception is raised.\n\
+\n\
+Note: For this reason it's probably better to throw an exception than\n\
+returning NXDOMAIN. This point should be revisited in a near future\n\
+version. In any case applications shouldn't call this method for an\n\
+out-of-zone name.\n\
+\n\
+DNSSEC considerations: The result when DNSSEC data are required can be\n\
+very complicated, especially if it involves negative result or\n\
+wildcard match. Specifically, if an application calls this method for\n\
+DNS query processing with DNSSEC data, and if the search result code\n\
+is either NXDOMAIN or NXRRRSET, and/or RESULT_WILDCARD\n\
+flag is set in the returned flags value,\n\
+then the application will need to find additional NSEC or NSEC3\n\
+records for supplemental proofs. This method helps the application for\n\
+such post search processing.\n\
+\n\
+First, it tells the application whether the zone is signed with NSEC\n\
+or NSEC3 via the RESULT_NSEC_SIGNED and RESULT_NSEC3_SIGNED flags\n\
+in the returned flags value. Any sanely signed zone\n\
+should be signed with either (and only one) of these two types of RRs;\n\
+however, the application should expect that the zone could be broken\n\
+and these methods could both return false. But this method should\n\
+ensure that not both of these methods return true.\n\
+\n\
+In case it's signed with NSEC3, there is no further information\n\
+returned from this method.\n\
+\n\
+In case it's signed with NSEC, this method will possibly return a\n\
+related NSEC RRset in the second element of the tuple. What kind of\n\
+NSEC is returned depends on the result code (NXDOMAIN or NXRRSET) and\n\
+on whether it's a wildcard match:\n\
+\n\
+- In case of NXDOMAIN, the returned NSEC covers the queried domain\n\
+ that proves that the query name does not exist in the zone. Note\n\
+ that this does not necessarily prove it doesn't even match a\n\
+ wildcard (even if the result of NXDOMAIN can only happen when\n\
+ there's no matching wildcard either). It is caller's responsibility\n\
+ to provide a proof that there is no matching wildcard if that proof\n\
+ is necessary.\n\
+- In case of NXRRSET, we need to consider the following cases\n\
+ referring to Section 3.1.3 of RFC4035:\n\
+\n\
+1. (Normal) no data: there is a matching non-wildcard name with a\n\
+ different RR type. This is the \"No Data\" case of the RFC.\n\
+2. (Normal) empty non terminal: there is no matching (exact or\n\
+ wildcard) name, but there is a subdomain with an RR of the query\n\
+ name. This is one case of \"Name Error\" of the RFC.\n\
+3. Wildcard empty non terminal: similar to 2, but the empty name is\n\
+ a wildcard, and matches the query name by wildcard expansion. This\n\
+ is a special case of \"Name Error\" of the RFC.\n\
+4. Wildcard no data: there is no exact match name, but there is a\n\
+ wildcard name that matches the query name with a different type of RR.\n\
+ This is the \"Wildcard No Data\" case of the RFC.\n\
+\n\
+In case 1, find() returns NSEC of the matching name.\n\
+\n\
+In case 2, find() will return NSEC for the interval where the empty\n\
+nonterminal lives. The end of the interval is the subdomain causing\n\
+existence of the empty nonterminal (if there's sub.x.example.com, and\n\
+no record in x.example.com, then x.example.com exists implicitly - is\n\
+the empty nonterminal and sub.x.example.com is the subdomain causing\n\
+it). Note that this NSEC proves not only the existence of empty non\n\
+terminal name but also the non existence of possibly matching wildcard\n\
+name, because there can be no better wildcard match than the exact\n\
+matching empty name.\n\
+\n\
+In case 3, find() will return NSEC for the interval where the wildcard\n\
+empty nonterminal lives. Cases 2 and 3 are especially complicated and\n\
+confusing. See the examples below.\n\
+\n\
+In case 4, find() will return NSEC of the matching wildcard name.\n\
+\n\
+Examples: if zone \"example.com\" has the following record:\n\
+\n\
+a.example.com. NSEC a.b.example.com.\n\
+\n\
+a call to find() for \"b.example.com.\" with the FIND_DNSSEC option\n\
+will result in NXRRSET, and this NSEC will be returned.\n\
+Likewise, if zone \"example.org\" has the following record,\n\
+\n\
+a.example.org. NSEC x.*.b.example.org.\n\
+\n\
+a call to find() for \"y.b.example.org\" with FIND_DNSSEC will\n\
+result in NXRRSET and this NSEC; RESULT_WILDCARD bit is set in the\n\
+returned flags.\n\
\n\
This method raises an isc.datasrc.Error exception if there is an\n\
internal error in the datasource.\n\
@@ -105,29 +194,35 @@ internal error in the datasource.\n\
Parameters:\n\
name The domain name to be searched for.\n\
type The RR type to be searched for.\n\
- target If target is not None, insert all RRs under the domain\n\
- into it.\n\
options The search options.\n\
\n\
-Return Value(s): A tuple of a result code (integer) and an RRset object\n\
-enclosing the search result (see above).\n\
+Return Value(s): A tuple of a result code (integer), an RRset object\n\
+and flags bitmask (integer).\n\
";
-const char* const ZoneFinder_find_previous_name_doc = "\
-find_previous_name(isc.dns.Name) -> isc.dns.Name\n\
+const char* const ZoneFinder_findAll_doc = "\
+find_all(isc.dns.Name, options=FIND_DEFAULT) ->\n\
+ (integer, RRset, integer) | (integer, [RRset], integer)\
\n\
-Gets the previous name in the DNSSEC order. This can be used\n\
-to find the correct NSEC records for proving nonexistence\n\
-of domains.\n\
+Finds all RRsets in the given name.\n\
\n\
-This method does not include under-zone-cut data (glue data).\n\
+This function works almost exactly in the same way as the find one.\n\
+The only difference is, when the lookup is successful (eg. the code is\n\
+SUCCESS), all the RRsets residing in the named node are returned in the\n\
+second element of the returned tuple. All\n\
+the other (unsuccessful) cases are handled the same, including\n\
+returning delegations, NSEC/NSEC3 availability and NSEC proofs,\n\
+wildcard information etc. The options parameter works the same way and\n\
+it should conform to the same exception restrictions.\n\
\n\
-Raises isc.datasrc.NotImplemented in case the data source backend\n\
-doesn't support DNSSEC or there is no previous in the zone (NSEC\n\
-records might be missing in the DB, the queried name is less or\n\
-equal to the apex).\n\
+Parameters:\n\
+ name The domain name to be searched for.\n\
+ options The search options.\n\
\n\
-Raises isc.datasrc.Error for low-level or internal datasource errors\n\
-(like broken connection to database, wrong data living there).\n\
+Return Value(s): A tuple of a result code (integer), an either\n\
+RRset object or a list of RRsets, and flags (integer).\n\
+In the second element a single RRset is returned for cases where the\n\
+result is some kind of delegation, CNAME or similar; in other cases\n\
+a list of RRsets is returned, containing all the results.\n\
";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index cb02724..1b0e3d1 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -45,6 +45,23 @@ using namespace isc::dns::python;
using namespace isc::datasrc;
using namespace isc::datasrc::python;
+namespace {
+ZoneFinder::FindResultFlags
+getFindResultFlags(const ZoneFinder::Context& context) {
+ ZoneFinder::FindResultFlags result_flags = ZoneFinder::RESULT_DEFAULT;
+ if (context.isWildcard()) {
+ result_flags = result_flags | ZoneFinder::RESULT_WILDCARD;
+ }
+ if (context.isNSECSigned()) {
+ result_flags = result_flags | ZoneFinder::RESULT_NSEC_SIGNED;
+ }
+ if (context.isNSEC3Signed()) {
+ result_flags = result_flags | ZoneFinder::RESULT_NSEC3_SIGNED;
+ }
+ return (result_flags);
+}
+}
+
namespace isc_datasrc_internal {
// This is the shared code for the find() call in the finder and the updater
// Is is intentionally not available through any header, nor at our standard
@@ -53,32 +70,93 @@ namespace isc_datasrc_internal {
PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
if (finder == NULL) {
PyErr_SetString(getDataSourceException("Error"),
- "Internal error in find() wrapper; finder object NULL");
+ "Internal error in find() wrapper; "
+ "finder object NULL");
return (NULL);
}
- PyObject *name;
- PyObject *rrtype;
- PyObject *target;
- int options_int;
- if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ PyObject* name;
+ PyObject* rrtype;
+ unsigned int options_int = ZoneFinder::FIND_DEFAULT;
+ if (PyArg_ParseTuple(args, "O!O!|I", &name_type, &name,
&rrtype_type, &rrtype,
- &target, &options_int)) {
+ &options_int)) {
try {
ZoneFinder::FindOptions options =
static_cast<ZoneFinder::FindOptions>(options_int);
- ZoneFinder::FindResult find_result(
- finder->find(PyName_ToName(name),
- PyRRType_ToRRType(rrtype),
- NULL,
- options
- ));
- ZoneFinder::Result r = find_result.code;
- isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ ConstZoneFinderContextPtr find_ctx(
+ finder->find(PyName_ToName(name), PyRRType_ToRRType(rrtype),
+ options));
+ const ZoneFinder::Result r = find_ctx->code;
+ isc::dns::ConstRRsetPtr rrsp = find_ctx->rrset;
+ ZoneFinder::FindResultFlags result_flags =
+ getFindResultFlags(*find_ctx);
if (rrsp) {
// Use N instead of O so the refcount isn't increased twice
- return (Py_BuildValue("IN", r, createRRsetObject(*rrsp)));
+ return (Py_BuildValue("INI", r, createRRsetObject(*rrsp),
+ result_flags));
+ } else {
+ return (Py_BuildValue("IOI", r, Py_None, result_flags));
+ }
+ } catch (const OutOfZone& ooz) {
+ PyErr_SetString(getDataSourceException("OutOfZone"), ooz.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject* ZoneFinder_helper_all(ZoneFinder* finder, PyObject* args) {
+ if (finder == NULL) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Internal error in find_all() wrapper; "
+ "finder object NULL");
+ return (NULL);
+ }
+ PyObject* name;
+ const unsigned int options_int = ZoneFinder::FIND_DEFAULT;
+ if (PyArg_ParseTuple(args, "O!|I", &name_type, &name, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ std::vector<isc::dns::ConstRRsetPtr> target;
+ ConstZoneFinderContextPtr find_ctx(
+ finder->findAll(PyName_ToName(name), target, options));
+ const ZoneFinder::Result r = find_ctx->code;
+ isc::dns::ConstRRsetPtr rrsp = find_ctx->rrset;
+ ZoneFinder::FindResultFlags result_flags =
+ getFindResultFlags(*find_ctx);
+ if (r == ZoneFinder::SUCCESS) {
+ // Copy all the RRsets to the result list
+ PyObjectContainer list_container(PyList_New(target.size()));
+ for (size_t i(0); i < target.size(); ++i) {
+ PyList_SET_ITEM(list_container.get(), i,
+ createRRsetObject(*target[i]));
+ }
+ // Construct the result with the list. The Py_BuildValue
+ // increases the refcount and the container decreases it
+ // later. This way, it feels safer in case the build function
+ // would fail.
+ return (Py_BuildValue("IOI", r, list_container.get(),
+ result_flags));
} else {
- return (Py_BuildValue("IO", r, Py_None));
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return (Py_BuildValue("INI", r, createRRsetObject(*rrsp),
+ result_flags));
+ } else {
+ return (Py_BuildValue("IOI", r, Py_None, result_flags));
+ }
}
} catch (const DataSourceError& dse) {
PyErr_SetString(getDataSourceException("Error"), dse.what());
@@ -94,7 +172,6 @@ PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
} else {
return (NULL);
}
- return Py_BuildValue("I", 1);
}
} // end namespace internal
@@ -118,7 +195,7 @@ typedef CPPPyObjectContainer<s_ZoneFinder, ZoneFinder> ZoneFinderContainer;
// General creation and destruction
int
-ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
+ZoneFinder_init(PyObject*, PyObject*, PyObject*) {
// can't be called directly
PyErr_SetString(PyExc_TypeError,
"ZoneFinder cannot be constructed directly");
@@ -127,7 +204,8 @@ ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
}
void
-ZoneFinder_destroy(s_ZoneFinder* const self) {
+ZoneFinder_destroy(PyObject* po_self) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
// cppobj is a shared ptr, but to make sure things are not destroyed in
// the wrong order, we reset it here.
self->cppobj.reset();
@@ -170,28 +248,10 @@ ZoneFinder_find(PyObject* po_self, PyObject* args) {
}
PyObject*
-ZoneFinder_findPreviousName(PyObject* po_self, PyObject* args) {
+ZoneFinder_find_all(PyObject* po_self, PyObject* args) {
s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
- PyObject* name_obj;
- if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
- try {
- return (createNameObject(
- self->cppobj->findPreviousName(PyName_ToName(name_obj))));
- } catch (const isc::NotImplemented& nie) {
- PyErr_SetString(getDataSourceException("NotImplemented"),
- nie.what());
- return (NULL);
- } catch (const std::exception& exc) {
- PyErr_SetString(getDataSourceException("Error"), exc.what());
- return (NULL);
- } catch (...) {
- PyErr_SetString(getDataSourceException("Error"),
- "Unexpected exception");
- return (NULL);
- }
- } else {
- return (NULL);
- }
+ return (isc_datasrc_internal::ZoneFinder_helper_all(self->cppobj.get(),
+ args));
}
// This list contains the actual set of functions we have in
@@ -205,8 +265,7 @@ PyMethodDef ZoneFinder_methods[] = {
ZoneFinder_getOrigin_doc },
{ "get_class", ZoneFinder_getClass, METH_NOARGS, ZoneFinder_getClass_doc },
{ "find", ZoneFinder_find, METH_VARARGS, ZoneFinder_find_doc },
- { "find_previous_name", ZoneFinder_findPreviousName, METH_VARARGS,
- ZoneFinder_find_previous_name_doc },
+ { "find_all", ZoneFinder_find_all, METH_VARARGS, ZoneFinder_findAll_doc },
{ NULL, NULL, 0, NULL }
};
@@ -221,7 +280,7 @@ PyTypeObject zonefinder_type = {
"datasrc.ZoneFinder",
sizeof(s_ZoneFinder), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(ZoneFinder_destroy),// tp_dealloc
+ ZoneFinder_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -252,7 +311,7 @@ PyTypeObject zonefinder_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(ZoneFinder_init),// tp_init
+ ZoneFinder_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
@@ -268,16 +327,16 @@ PyTypeObject zonefinder_type = {
PyObject*
createZoneFinderObject(isc::datasrc::ZoneFinderPtr source, PyObject* base_obj) {
- s_ZoneFinder* py_zi = static_cast<s_ZoneFinder*>(
+ s_ZoneFinder* py_zf = static_cast<s_ZoneFinder*>(
zonefinder_type.tp_alloc(&zonefinder_type, 0));
- if (py_zi != NULL) {
- py_zi->cppobj = source;
- py_zi->base_obj = base_obj;
- }
- if (base_obj != NULL) {
- Py_INCREF(base_obj);
+ if (py_zf != NULL) {
+ py_zf->cppobj = source;
+ py_zf->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
}
- return (py_zi);
+ return (py_zf);
}
} // namespace python
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
index b1d9d25..087200a 100644
--- a/src/lib/python/isc/datasrc/iterator_inc.cc
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -31,4 +31,37 @@ the end of the zone.\n\
Raises an isc.datasrc.Error exception if it is called again after returning\n\
None\n\
";
+
+// Modifications:
+// - ConstRRset->RRset
+// - NULL->None
+// - removed notes about derived classes (which doesn't apply for python)
+const char* const ZoneIterator_getSOA_doc = "\
+get_soa() -> isc.dns.RRset\n\
+\n\
+Return the SOA record of the zone in the iterator context.\n\
+\n\
+This method returns the zone's SOA record (if any, and a valid zone\n\
+should have it) in the form of an RRset object. This SOA is identical\n\
+to that (again, if any) contained in the sequence of RRsets returned\n\
+by the iterator. In that sense this method is redundant, but is\n\
+provided as a convenient utility for the application of the iterator;\n\
+the application may need to know the SOA serial or the SOA RR itself\n\
+for the purpose of protocol handling or skipping the expensive\n\
+iteration processing.\n\
+\n\
+If the zone doesn't have an SOA (which is broken, but some data source\n\
+may allow that situation), this method returns None. Also, in the\n\
+normal and valid case, the SOA should have exactly one RDATA, but this\n\
+API does not guarantee it as some data source may accept such an\n\
+abnormal condition. It's up to the caller whether to check the number\n\
+of RDATA and how to react to the unexpected case.\n\
+\n\
+Exceptions:\n\
+ None\n\
+\n\
+Return Value(s): An SOA RRset object that would be\n\
+returned from the iteration. It will be None if the zone doesn't have\n\
+an SOA.\n\
+";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
index c52ab4a..9e6900c 100644
--- a/src/lib/python/isc/datasrc/iterator_python.cc
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -132,10 +132,35 @@ ZoneIterator_next(PyObject* self) {
}
}
+PyObject*
+ZoneIterator_getSOA(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getSOA();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
PyMethodDef ZoneIterator_methods[] = {
- { "get_next_rrset",
- reinterpret_cast<PyCFunction>(ZoneIterator_getNextRRset), METH_NOARGS,
+ { "get_next_rrset", ZoneIterator_getNextRRset, METH_NOARGS,
ZoneIterator_getNextRRset_doc },
+ { "get_soa", ZoneIterator_getSOA, METH_NOARGS, ZoneIterator_getSOA_doc },
{ NULL, NULL, 0, NULL }
};
@@ -204,9 +229,9 @@ createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
if (py_zi != NULL) {
py_zi->cppobj = source;
py_zi->base_obj = base_obj;
- }
- if (base_obj != NULL) {
- Py_INCREF(base_obj);
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
}
return (py_zi);
}
diff --git a/src/lib/python/isc/datasrc/journal_reader_inc.cc b/src/lib/python/isc/datasrc/journal_reader_inc.cc
new file mode 100644
index 0000000..35ba70e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_inc.cc
@@ -0,0 +1,80 @@
+namespace {
+const char* const ZoneJournalReader_doc = "\
+The base class for retrieving differences between two versions of a\n\
+zone.\n\
+\n\
+On construction, each derived class object will internally set up\n\
+retrieving sequences of differences between two specific version of a\n\
+specific zone managed in a particular data source. So the constructor\n\
+of a derived class would normally take parameters to identify the zone\n\
+and the two versions for which the differences should be retrieved.\n\
+See DataSourceClient.get_journal_reader for more concrete details used\n\
+in this API.\n\
+\n\
+Once constructed, an object of this class will act like an iterator\n\
+over the sequences. Every time the get_next_diff() method is called it\n\
+returns one element of the differences in the form of an RRset until\n\
+it reaches the end of the entire sequences.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+// ConstRRsetPtr -> RRset
+// Null -> None
+// InvalidOperation -> ValueError
+const char* const ZoneJournalReader_getNextDiff_doc = "\
+get_next_diff() -> isc.dns.RRset\n\
+\n\
+Return the next difference RR of difference sequences.\n\
+\n\
+In this API, the difference between two versions of a zone is\n\
+conceptually represented as IXFR-style difference sequences: Each\n\
+difference sequence is a sequence of RRs: an older version of SOA (to\n\
+be deleted), zero or more other deleted RRs, the post-transaction SOA\n\
+(to be added), and zero or more other added RRs. (Note, however, that\n\
+the underlying data source implementation may or may not represent the\n\
+difference in straightforward realization of this concept. The mapping\n\
+between the conceptual difference and the actual implementation is\n\
+hidden in each derived class).\n\
+\n\
+This method provides an application with a higher level interface to\n\
+retrieve the difference along with the conceptual model: the\n\
+ZoneJournalReader object iterates over the entire sequences from the\n\
+beginning SOA (which is to be deleted) to one of the added RR of with\n\
+the ending SOA, and each call to this method returns one RR in the\n\
+form of an RRset that contains exactly one RDATA in the order of the\n\
+sequences.\n\
+\n\
+Note that the ordering of the sequences specifies the semantics of\n\
+each difference: add or delete. For example, the first RR is to be\n\
+deleted, and the last RR is to be added. So the return value of this\n\
+method does not explicitly indicate whether the RR is to be added or\n\
+deleted.\n\
+\n\
+This method ensures the returned RRset represents an RR, that is, it\n\
+contains exactly one RDATA. However, it does not necessarily ensure\n\
+that the resulting sequences are in the form of IXFR-style. For\n\
+example, the first RR is supposed to be an SOA, and it should normally\n\
+be the case, but this interface does not necessarily require the\n\
+derived class implementation ensure this. Normally the differences are\n\
+expected to be stored using this API (via a ZoneUpdater object), and\n\
+as long as that is the case and the underlying implementation follows\n\
+the requirement of the API, the result of this method should be a\n\
+valid IXFR-style sequences. So this API does not mandate the almost\n\
+redundant check as part of the interface. If the application needs to\n\
+make it sure 100%, it must check the resulting sequence itself.\n\
+\n\
+Once the object reaches the end of the sequences, this method returns\n\
+None. Any subsequent call will result in an exception of class\n\
+ValueError.\n\
+\n\
+Exceptions:\n\
+ ValueError The method is called beyond the end of the\n\
+ difference sequences.\n\
+ isc.datasrc.Error Underlying data is broken and the RR cannot be\n\
+ created or other low level data source error.\n\
+\n\
+Return Value(s): An RRset that contains one RDATA corresponding to the\n\
+next difference in the sequences.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.cc b/src/lib/python/isc/datasrc/journal_reader_python.cc
new file mode 100644
index 0000000..ff398d1
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.cc
@@ -0,0 +1,200 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "journal_reader_python.h"
+
+#include "journal_reader_inc.cc"
+
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneJournalReader : public PyObject {
+public:
+ s_ZoneJournalReader() : cppobj(ZoneJournalReaderPtr()), base_obj(NULL) {};
+ ZoneJournalReaderPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
+};
+
+// General creation and destruction
+int
+ZoneJournalReader_init(PyObject*, PyObject*, PyObject*) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneJournalReader cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneJournalReader_destroy(PyObject* po_self) {
+ s_ZoneJournalReader* const self =
+ static_cast<s_ZoneJournalReader*>(po_self) ;
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneJournalReader_getNextDiff(PyObject* po_self, PyObject*) {
+ s_ZoneJournalReader* self = static_cast<s_ZoneJournalReader*>(po_self);
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextDiff();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::InvalidOperation& ex) {
+ PyErr_SetString(PyExc_ValueError, ex.what());
+ return (NULL);
+ } catch (const isc::Exception& isce) {
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneJournalReader_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneJournalReader_next(PyObject* self) {
+ PyObject* result = ZoneJournalReader_getNextDiff(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyMethodDef ZoneJournalReader_methods[] = {
+ { "get_next_diff", ZoneJournalReader_getNextDiff, METH_NOARGS,
+ ZoneJournalReader_getNextDiff_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject journal_reader_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneJournalReader",
+ sizeof(s_ZoneJournalReader), // tp_basicsize
+ 0, // tp_itemsize
+ ZoneJournalReader_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneJournalReader_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneJournalReader_iter, // tp_iter
+ ZoneJournalReader_next, // tp_iternext
+ ZoneJournalReader_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ ZoneJournalReader_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneJournalReaderObject(ZoneJournalReaderPtr source,
+ PyObject* base_obj)
+{
+ s_ZoneJournalReader* po = static_cast<s_ZoneJournalReader*>(
+ journal_reader_type.tp_alloc(&journal_reader_type, 0));
+ if (po != NULL) {
+ po->cppobj = source;
+ po->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
+ }
+ return (po);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.h b/src/lib/python/isc/datasrc/journal_reader_python.h
new file mode 100644
index 0000000..56344df
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_JOURNAL_READER_H
+#define __PYTHON_DATASRC_JOURNAL_READER_H 1
+
+#include <Python.h>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+extern PyTypeObject journal_reader_type;
+
+/// \brief Create a ZoneJournalReader python object
+///
+/// \param source The zone journal reader pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneJournalReader depends on
+/// Its refcount is increased, and will be decreased when
+/// this reader is destroyed, making sure that the
+/// base object is never destroyed before this reader.
+PyObject* createZoneJournalReaderObject(
+ isc::datasrc::ZoneJournalReaderPtr source,
+ PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_JOURNAL_READER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index fd63741..f9b47c0 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -23,6 +23,10 @@ RR_NAME_INDEX = 2
RR_TTL_INDEX = 4
RR_RDATA_INDEX = 7
+# Current major and minor versions of schema
+SCHEMA_MAJOR_VERSION = 2
+SCHEMA_MINOR_VERSION = 0
+
class Sqlite3DSError(Exception):
""" Define exceptions."""
pass
@@ -47,32 +51,46 @@ def create(cur):
cur.execute("SELECT version FROM schema_version")
row = cur.fetchone()
except sqlite3.OperationalError:
- cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
- cur.execute("INSERT INTO schema_version VALUES (1)")
+ cur.execute("""CREATE TABLE schema_version (version INTEGER NOT NULL,
+ minor INTEGER NOT NULL DEFAULT 0)""")
+ cur.execute("INSERT INTO schema_version VALUES (" +
+ str(SCHEMA_MAJOR_VERSION) + ", " +
+ str(SCHEMA_MINOR_VERSION) + ")")
cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
- name STRING NOT NULL COLLATE NOCASE,
- rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
+ name TEXT NOT NULL COLLATE NOCASE,
+ rdclass TEXT NOT NULL COLLATE NOCASE DEFAULT 'IN',
dnssec BOOLEAN NOT NULL DEFAULT 0)""")
cur.execute("CREATE INDEX zones_byname ON zones (name)")
cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
zone_id INTEGER NOT NULL,
- name STRING NOT NULL COLLATE NOCASE,
- rname STRING NOT NULL COLLATE NOCASE,
+ name TEXT NOT NULL COLLATE NOCASE,
+ rname TEXT NOT NULL COLLATE NOCASE,
ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- sigtype STRING COLLATE NOCASE,
- rdata STRING NOT NULL)""")
+ rdtype TEXT NOT NULL COLLATE NOCASE,
+ sigtype TEXT COLLATE NOCASE,
+ rdata TEXT NOT NULL)""")
cur.execute("CREATE INDEX records_byname ON records (name)")
cur.execute("CREATE INDEX records_byrname ON records (rname)")
+ cur.execute("""CREATE INDEX records_bytype_and_rname ON records
+ (rdtype, rname)""")
cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
zone_id INTEGER NOT NULL,
- hash STRING NOT NULL COLLATE NOCASE,
- owner STRING NOT NULL COLLATE NOCASE,
+ hash TEXT NOT NULL COLLATE NOCASE,
+ owner TEXT NOT NULL COLLATE NOCASE,
ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- rdata STRING NOT NULL)""")
+ rdtype TEXT NOT NULL COLLATE NOCASE,
+ rdata TEXT NOT NULL)""")
cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
- row = [1]
+ cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ version INTEGER NOT NULL,
+ operation INTEGER NOT NULL,
+ name TEXT NOT NULL COLLATE NOCASE,
+ rrtype TEXT NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdata TEXT NOT NULL)""")
+ cur.execute("SELECT version FROM schema_version")
+ row = cur.fetchone()
cur.execute("COMMIT TRANSACTION")
return row
@@ -107,8 +125,9 @@ def open(dbfile, connect_timeout=5.0):
row = create(cur)
conn.isolation_level = iso_lvl
- if row == None or row[0] != 1:
- raise Sqlite3DSError("Bad database schema version")
+ if row == None or row[0] != SCHEMA_MAJOR_VERSION:
+ bad_version = "(unknown)" if row is None else str(row[0])
+ raise Sqlite3DSError("Bad database schema version: " + bad_version)
return conn, cur
diff --git a/src/lib/python/isc/datasrc/tests/.gitignore b/src/lib/python/isc/datasrc/tests/.gitignore
new file mode 100644
index 0000000..58ea8cd
--- /dev/null
+++ b/src/lib/python/isc/datasrc/tests/.gitignore
@@ -0,0 +1 @@
+/*.sqlite3.copied
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 411b5cc..c996f2a 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,11 +1,14 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
# old tests, TODO remove or change to use new API?
-#PYTESTS = master_test.py sqlite3_ds_test.py
-PYTESTS = datasrc_test.py
+#PYTESTS = master_test.py
+PYTESTS = datasrc_test.py sqlite3_ds_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
+EXTRA_DIST += testdata/newschema.sqlite3
+EXTRA_DIST += testdata/oldschema.sqlite3
+EXTRA_DIST += testdata/new_minor_schema.sqlite3
CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
@@ -33,5 +36,6 @@ endif
PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_srcdir)/testdata \
TESTDATA_WRITE_PATH=$(abs_builddir) \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 15fa347..74f822f 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -15,11 +15,14 @@
import isc.log
import isc.datasrc
-from isc.datasrc import ZoneFinder
-import isc.dns
+from isc.datasrc import ZoneFinder, ZoneJournalReader
+from isc.dns import *
+from isc.testutils.rrset_utils import rrsets_equal
import unittest
+import sqlite3
import os
import shutil
+import sys
import json
TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
@@ -38,19 +41,6 @@ def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
rrset_list.append(rrset_to_add)
-# helper function, we have no direct rrset comparison atm
-def rrsets_equal(a, b):
- # no accessor for sigs either (so this only checks name, class, type, ttl,
- # and rdata)
- # also, because of the fake data in rrsigs, if the type is rrsig, the
- # rdata is not checked
- return a.get_name() == b.get_name() and\
- a.get_class() == b.get_class() and\
- a.get_type() == b.get_type() and \
- a.get_ttl() == b.get_ttl() and\
- (a.get_type() == isc.dns.RRType.RRSIG() or
- sorted(a.get_rdata()) == sorted(b.get_rdata()))
-
# returns true if rrset is in expected_rrsets
# will remove the rrset from expected_rrsets if found
def check_for_rrset(expected_rrsets, rrset):
@@ -60,9 +50,71 @@ def check_for_rrset(expected_rrsets, rrset):
return True
return False
+def create_soa(serial):
+ soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ 'ns1.example.org. admin.example.org. ' +
+ str(serial) + ' 3600 1800 2419200 7200'))
+ return soa
+
+def test_findall_common(self, tested):
+ """
+ Common part of the find_all test. It tests a find_all method on the passed
+ object.
+ """
+ # Some "failure" responses
+ result, rrset, _ = tested.find_all(isc.dns.Name("www.sql1.example.com"),
+ ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.DELEGATION, result)
+ expected = RRset(Name('sql1.example.com.'), RRClass.IN(), RRType.NS(),
+ RRTTL(3600))
+ expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ 'dns01.example.com.'))
+ expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ 'dns02.example.com.'))
+ expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ 'dns03.example.com.'))
+ self.assertTrue(rrsets_equal(expected, rrset))
+
+ result, rrset, _ = tested.find_all(isc.dns.Name("nxdomain.example.com"),
+ ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.NXDOMAIN, result)
+ self.assertIsNone(None, rrset)
+
+ # A success. It should return the list now.
+ # This also tests we can ommit the options parameter
+ result, rrsets, _ = tested.find_all(isc.dns.Name("mix.example.com."))
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual(2, len(rrsets))
+ rrsets.sort(key=lambda rrset: rrset.get_type().to_text())
+ expected = [
+ RRset(Name('mix.example.com.'), RRClass.IN(), RRType.A(),
+ RRTTL(3600)),
+ RRset(Name('mix.example.com.'), RRClass.IN(), RRType.AAAA(),
+ RRTTL(3600))
+ ]
+ expected[0].add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
+ expected[0].add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.2"))
+ expected[1].add_rdata(Rdata(RRType.AAAA(), RRClass.IN(),
+ "2001:db8::1"))
+ expected[1].add_rdata(Rdata(RRType.AAAA(), RRClass.IN(),
+ "2001:db8::2"))
+ for (rrset, exp) in zip(rrsets, expected):
+ self.assertTrue(rrsets_equal(exp, rrset))
+
+ # Check the reference counts on them. The getrefcount returns one more,
+ # as for the reference in its own parameter - see its docs.
+
+ # Two - one for the variable, one for parameter
+ self.assertEqual(2, sys.getrefcount(rrsets))
+ for rrset in rrsets:
+ # 3 - one as the element of list, one for the rrset variable
+ # and one for the parameter.
+ self.assertEqual(3, sys.getrefcount(rrset))
+
class DataSrcClient(unittest.TestCase):
- def test_constructors(self):
+ def test_(self):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
@@ -86,7 +138,7 @@ class DataSrcClient(unittest.TestCase):
# for RRSIGS, the TTL's are currently modified. This test should
# start failing when we fix that.
- rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), True)
# we do not know the order in which they are returned by the iterator
# but we do want to check them, so we put all records into one list
@@ -113,7 +165,11 @@ class DataSrcClient(unittest.TestCase):
"256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
"N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
"TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
- "5fs0dE/xLztL/CzZ",
+ "5fs0dE/xLztL/CzZ"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
"257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
"KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
"ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
@@ -125,8 +181,16 @@ class DataSrcClient(unittest.TestCase):
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
[
- "dns01.example.com.",
- "dns02.example.com.",
+ "dns01.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns02.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
"dns03.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
@@ -136,9 +200,20 @@ class DataSrcClient(unittest.TestCase):
])
# For RRSIGS, we can't add the fake data through the API, so we
# simply pass no rdata at all (which is skipped by the check later)
+
+ # Since we passed separate_rrs = True to get_iterator, we get several
+ # sets of RRSIGs, one for each TTL
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
[
"master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
@@ -157,6 +232,8 @@ class DataSrcClient(unittest.TestCase):
])
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
# rrs is an iterator, but also has direct get_next_rrset(), use
# the latter one here
@@ -171,23 +248,55 @@ class DataSrcClient(unittest.TestCase):
# Now check there are none left
self.assertEqual(0, len(expected_rrset_list),
"RRset(s) not returned by iterator: " +
- str([rrset.to_text() for rrset in expected_rrset_list ]
+ str([rrset.get_name().to_text() + '/' +
+ rrset.get_type().to_text() for rrset in
+ expected_rrset_list ]
))
# TODO should we catch this (iterating past end) and just return None
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+ # Without the separate_rrs argument, it should return 55 RRsets
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
rrets = dsc.get_iterator(isc.dns.Name("example.com"))
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
- self.assertEqual(55, len(list(rrets)))
+ # There are 40 non-RRSIG RRsets and 32 dinstinct RRSIGs.
+ self.assertEqual(72, len(list(rrets)))
+
+ # same test, but now with explicit False argument for separate_rrs
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(72, len(list(rrets)))
+
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(84, len(list(rrets)))
# TODO should we catch this (iterating past end) and just return None
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+ def test_iterator_soa(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ iterator = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+ expected_soa = isc.dns.RRset(isc.dns.Name("sql1.example.com."),
+ isc.dns.RRClass.IN(),
+ isc.dns.RRType.SOA(),
+ isc.dns.RRTTL(3600))
+ expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA(),
+ isc.dns.RRClass.IN(),
+ "master.example.com. " +
+ "admin.example.com. 678 " +
+ "3600 1800 2419200 7200"))
+ self.assertTrue(rrsets_equal(expected_soa, iterator.get_soa()))
+
def test_construct(self):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
@@ -210,10 +319,28 @@ class DataSrcClient(unittest.TestCase):
self.assertNotEqual(ZoneFinder.NXDOMAIN, ZoneFinder.NXRRSET)
self.assertNotEqual(ZoneFinder.NXRRSET, ZoneFinder.CNAME)
self.assertNotEqual(ZoneFinder.CNAME, ZoneFinder.DNAME)
- self.assertNotEqual(ZoneFinder.DNAME, ZoneFinder.WILDCARD)
- self.assertNotEqual(ZoneFinder.WILDCARD, ZoneFinder.WILDCARD_CNAME)
- self.assertNotEqual(ZoneFinder.WILDCARD_CNAME,
- ZoneFinder.WILDCARD_NXRRSET)
+
+ def test_findresultflags(self):
+ '''A simple test just confirming the flags are all different.'''
+ self.assertNotEqual(ZoneFinder.RESULT_WILDCARD,
+ ZoneFinder.RESULT_NSEC_SIGNED)
+ self.assertNotEqual(ZoneFinder.RESULT_NSEC_SIGNED,
+ ZoneFinder.RESULT_NSEC3_SIGNED)
+ self.assertNotEqual(ZoneFinder.RESULT_NSEC3_SIGNED,
+ ZoneFinder.RESULT_WILDCARD)
+
+ def test_findall(self):
+ """
+ A test for the find_all method.
+ """
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ test_findall_common(self, finder)
def test_find(self):
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
@@ -223,114 +350,120 @@ class DataSrcClient(unittest.TestCase):
self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
- result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ # Check the optional parameters are optional
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A())
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset, _ = finder.find(isc.dns.Name("www.sql1.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.DELEGATION, result)
self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
"sql1.example.com. 3600 IN NS dns02.example.com.\n" +
"sql1.example.com. 3600 IN NS dns03.example.com.\n",
rrset.to_text())
- result, rrset = finder.find(isc.dns.Name("doesnotexist.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("doesnotexist.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
- result, rrset = finder.find(isc.dns.Name("www.some.other.domain"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
- self.assertEqual(finder.NXDOMAIN, result)
- self.assertEqual(None, rrset)
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.TXT(),
- None,
- finder.FIND_DEFAULT)
+ self.assertRaises(isc.datasrc.OutOfZone, finder.find,
+ isc.dns.Name("www.some.other.domain"),
+ isc.dns.RRType.A())
+
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.TXT(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.NXRRSET, result)
self.assertEqual(None, rrset)
- result, rrset = finder.find(isc.dns.Name("cname-ext.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.CNAME, result)
self.assertEqual(
"cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
rrset.to_text())
- result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
- self.assertEqual(finder.WILDCARD, result)
+ result, rrset, flags = \
+ finder.find(isc.dns.Name("foo.wild.example.com"),
+ isc.dns.RRType.A(), finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(finder.RESULT_WILDCARD, flags)
self.assertEqual("foo.wild.example.com. 3600 IN A 192.0.2.255\n",
rrset.to_text())
- result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
- isc.dns.RRType.TXT(),
- None,
- finder.FIND_DEFAULT)
- self.assertEqual(finder.WILDCARD_NXRRSET, result)
+ result, rrset, _ = finder.find(isc.dns.Name("foo.wild.example.com"),
+ isc.dns.RRType.TXT(),
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXRRSET, result)
+ self.assertTrue(finder.RESULT_WILDCARD, flags)
self.assertEqual(None, rrset)
self.assertRaises(TypeError, finder.find,
"foo",
isc.dns.RRType.A(),
- None,
finder.FIND_DEFAULT)
self.assertRaises(TypeError, finder.find,
isc.dns.Name("cname-ext.example.com"),
"foo",
- None,
finder.FIND_DEFAULT)
self.assertRaises(TypeError, finder.find,
isc.dns.Name("cname-ext.example.com"),
isc.dns.RRType.A(),
- None,
"foo")
- def test_find_previous(self):
- dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
-
- result, finder = dsc.find_zone(isc.dns.Name("example.com"))
- self.assertEqual(finder.SUCCESS, result)
-
- prev = finder.find_previous_name(isc.dns.Name("bbb.example.com"))
- self.assertEqual("example.com.", prev.to_text())
-
- prev = finder.find_previous_name(isc.dns.Name("zzz.example.com"))
- self.assertEqual("www.example.com.", prev.to_text())
-
- prev = finder.find_previous_name(prev)
- self.assertEqual("*.wild.example.com.", prev.to_text())
-
- self.assertRaises(isc.datasrc.NotImplemented,
- finder.find_previous_name,
- isc.dns.Name("com"))
-
class DataSrcUpdater(unittest.TestCase):
def setUp(self):
# Make a fresh copy of the writable database with all original content
shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ def test_findall(self):
+ """
+ The same test as DataSrcClient.test_findall, but on an updater
+ instead of a finder.
+ """
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ updater = dsc.get_updater(isc.dns.Name("example.com"), False)
+
+ test_findall_common(self, updater)
+
def test_construct(self):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+ def test_update_finder(self):
+ # Check basic behavior of updater's finder
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ updater = dsc.get_updater(isc.dns.Name("example.com"), False)
+ result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ # Omit optional parameters
+ result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A())
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
def test_update_delete_commit(self):
dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
@@ -341,10 +474,9 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -363,17 +495,15 @@ class DataSrcUpdater(unittest.TestCase):
# The record should be gone in the updater, but not in the original
# finder (since we have not committed)
- result, rrset = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -383,10 +513,9 @@ class DataSrcUpdater(unittest.TestCase):
self.assertRaises(isc.datasrc.Error, updater.commit)
# the record should be gone now in the 'real' finder as well
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -398,10 +527,9 @@ class DataSrcUpdater(unittest.TestCase):
# second commit should throw
self.assertRaises(isc.datasrc.Error, updater.commit)
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -423,10 +551,9 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -447,10 +574,9 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -469,10 +595,9 @@ class DataSrcUpdater(unittest.TestCase):
# The record should be gone in the updater, but not in the original
# finder (since we have not committed)
- result, rrset = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -480,10 +605,9 @@ class DataSrcUpdater(unittest.TestCase):
updater = None
# the record should still be available in the 'real' finder as well
- result, rrset = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
- None,
- finder.FIND_DEFAULT)
+ result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -494,6 +618,250 @@ class DataSrcUpdater(unittest.TestCase):
dsc.get_updater(isc.dns.Name("notexistent.example"),
True))
+ def test_client_reference(self):
+ # Temporarily create various objects using factory methods of the
+ # client. The created objects won't be stored anywhere and
+ # immediately released. The creation shouldn't affect the reference
+ # to the base client.
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ orig_ref = sys.getrefcount(dsc)
+
+ dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+ dsc.get_iterator(isc.dns.Name("example.com."))
+ self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+ dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+ def test_iterate_over_empty_zone(self):
+ # empty the test zone first
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.commit()
+
+ # Check the iterator behavior for the empty zone.
+ iterator = dsc.get_iterator(isc.dns.Name("example.com."))
+ self.assertEqual(None, iterator.get_soa())
+ self.assertEqual(None, iterator.get_next_rrset())
+
+class JournalWrite(unittest.TestCase):
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+
+ def tearDown(self):
+ self.dsc = None
+ self.updater = None
+
+ def check_journal(self, expected_list):
+ # This assumes sqlite3 DB and directly fetches stored data from
+ # the DB file. It should be generalized using ZoneJournalReader
+ # once it's supported.
+ conn = sqlite3.connect(WRITE_ZONE_DB_FILE)
+ cur = conn.cursor()
+ cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+ actual_list = cur.fetchall()
+ self.assertEqual(len(expected_list), len(actual_list))
+ for (expected, actual) in zip(expected_list, actual_list):
+ self.assertEqual(expected, actual)
+ conn.close()
+
+ def create_a(self, address):
+ a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+ RRTTL(3600))
+ a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ return (a_rr)
+
+ def test_journal_write(self):
+ # This is a straightforward port of the C++ 'journal' test
+ # Note: we add/delete 'out of zone' data (example.org in the
+ # example.com zone for convenience.
+ self.updater.delete_rrset(create_soa(1234))
+ self.updater.delete_rrset(self.create_a('192.0.2.2'))
+ self.updater.add_rrset(create_soa(1235))
+ self.updater.add_rrset(self.create_a('192.0.2.2'))
+ self.updater.commit()
+
+ expected = []
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ "1234 3600 1800 2419200 7200"))
+ expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ "1235 3600 1800 2419200 7200"))
+ expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+ self.check_journal(expected)
+
+ def test_journal_write_multiple(self):
+ # This is a straightforward port of the C++ 'journalMultiple' test
+ expected = []
+ for i in range(1, 100):
+ self.updater.delete_rrset(create_soa(1234 + i - 1))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ str(1234 + i - 1) + " 3600 1800 2419200 7200"))
+ self.updater.add_rrset(create_soa(1234 + i))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ str(1234 + i) + " 3600 1800 2419200 7200"))
+ self.updater.commit()
+ self.check_journal(expected)
+
+ def test_journal_write_bad_sequence(self):
+ # This is a straightforward port of the C++ 'journalBadSequence' test
+
+ # Delete A before SOA
+ self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+ self.create_a('192.0.2.1'))
+ # Add before delete
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ create_soa(1234))
+ # Add A before SOA
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ self.create_a('192.0.2.1'))
+ # Commit before add
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.commit)
+ # Delete two SOAs
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+ create_soa(1235))
+ # Add two SOAs
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.updater.add_rrset(create_soa(1235))
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ create_soa(1236))
+
+ def test_journal_write_onerase(self):
+ self.updater = None
+ self.assertRaises(isc.datasrc.Error, self.dsc.get_updater,
+ Name("example.com"), True, True)
+
+ def test_journal_write_badparam(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ self.assertRaises(TypeError, dsc.get_updater, 0, False, True)
+ self.assertRaises(TypeError, dsc.get_updater, Name('example.com'),
+ False, 0)
+ self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
+ 1, True)
+
+class JournalRead(unittest.TestCase):
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ self.zname = Name('example.com')
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self.reader = None
+
+ def tearDown(self):
+ # Some tests leave the reader in the middle of sequence, holding
+ # the lock. Since the unittest framework keeps each test object
+ # until the end of the entire tests, we need to make sure the reader
+ # is released at the end of each test. The client shouldn't do harm
+ # but we clean it up, too, just in case.
+ self.dsc = None
+ self.reader = None
+
+ def make_simple_diff(self, begin_soa):
+ updater = self.dsc.get_updater(self.zname, False, True)
+ updater.delete_rrset(begin_soa)
+ updater.add_rrset(create_soa(1235))
+ updater.commit()
+
+ def test_journal_reader(self):
+ # This is a straightforward port of the C++ 'journalReader' test
+ self.make_simple_diff(create_soa(1234))
+ result, self.reader = self.dsc.get_journal_reader(self.zname, 1234,
+ 1235)
+ self.assertEqual(ZoneJournalReader.SUCCESS, result)
+ self.assertNotEqual(None, self.reader)
+ rrsets_equal(create_soa(1234), self.reader.get_next_diff())
+ rrsets_equal(create_soa(1235), self.reader.get_next_diff())
+ self.assertEqual(None, self.reader.get_next_diff())
+ self.assertRaises(ValueError, self.reader.get_next_diff)
+
+ def test_journal_reader_with_large_serial(self):
+ # similar to the previous one, but use a very large serial to check
+ # if the python wrapper code has unexpected integer overflow
+ self.make_simple_diff(create_soa(4294967295))
+ result, self.reader = self.dsc.get_journal_reader(self.zname,
+ 4294967295, 1235)
+ self.assertNotEqual(None, self.reader)
+ # dump to text and compare them in case create_soa happens to have
+ # an overflow bug
+ self.assertEqual('example.org. 3600 IN SOA ns1.example.org. ' + \
+ 'admin.example.org. 4294967295 3600 1800 ' + \
+ '2419200 7200\n',
+ self.reader.get_next_diff().to_text())
+
+ def test_journal_reader_large_journal(self):
+ # This is a straightforward port of the C++ 'readLargeJournal' test.
+ # In this test we use the ZoneJournalReader object as a Python
+ # iterator.
+ updater = self.dsc.get_updater(self.zname, False, True)
+ expected = []
+ for i in range(0, 100):
+ rrset = create_soa(1234 + i)
+ updater.delete_rrset(rrset)
+ expected.append(rrset)
+
+ rrset = create_soa(1234 + i + 1)
+ updater.add_rrset(rrset)
+ expected.append(rrset)
+
+ updater.commit()
+ _, self.reader = self.dsc.get_journal_reader(self.zname, 1234, 1334)
+ self.assertNotEqual(None, self.reader)
+ i = 0
+ for rr in self.reader:
+ self.assertNotEqual(len(expected), i)
+ rrsets_equal(expected[i], rr)
+ i += 1
+ self.assertEqual(len(expected), i)
+
+ def test_journal_reader_no_range(self):
+ # This is a straightforward port of the C++ 'readJournalForNoRange'
+ # test
+ self.make_simple_diff(create_soa(1234))
+ result, self.reader = self.dsc.get_journal_reader(self.zname, 1200,
+ 1235)
+ self.assertEqual(ZoneJournalReader.NO_SUCH_VERSION, result)
+ self.assertEqual(None, self.reader)
+
+ def test_journal_reader_no_zone(self):
+ # This is a straightforward port of the C++ 'journalReaderForNXZone'
+ # test
+ result, self.reader = self.dsc.get_journal_reader(Name('nosuchzone'),
+ 0, 1)
+ self.assertEqual(ZoneJournalReader.NO_SUCH_ZONE, result)
+ self.assertEqual(None, self.reader)
+
+ def test_journal_reader_bad_params(self):
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ 'example.com.', 0, 1)
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ self.zname, 'must be int', 1)
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ self.zname, 0, 'must be int')
+
+ def test_journal_reader_direct_construct(self):
+ # ZoneJournalReader can only be constructed via a factory
+ self.assertRaises(TypeError, ZoneJournalReader)
+
if __name__ == "__main__":
isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
index 10c61cf..5604c32 100644
--- a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
+++ b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
@@ -22,122 +22,18 @@ import sqlite3
TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
-READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
-BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
-WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
-NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
-
-def example_reader():
- my_zone = [
- ("example.com.", "3600", "IN", "SOA", "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200"),
- ("example.com.", "3600", "IN", "NS", "ns.example.com."),
- ("ns.example.com.", "3600", "IN", "A", "192.0.2.1")
- ]
- for rr in my_zone:
- yield rr
-
-def example_reader_nested():
- # this iterator is used in the 'locked' test; it will cause
- # the load() method to try and write to the same database
- sqlite3_ds.load(WRITE_ZONE_DB_FILE,
- ".",
- example_reader)
- return example_reader()
-
-class TestSqlite3_ds(unittest.TestCase):
- def test_zone_exist(self):
- # The following file must be non existent and must be non
- # "creatable"; the sqlite3 library will try to create a new
- # DB file if it doesn't exist, so to test a failure case the
- # create operation should also fail. The "nodir", a non
- # existent directory, is inserted for this purpose.
- nodir = "/nodir/notexist"
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.zone_exist, "example.com", nodir)
- # Open a broken database file
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.zone_exist, "example.com",
- BROKEN_DB_FILE)
- self.assertTrue(sqlite3_ds.zone_exist("example.com.",
- READ_ZONE_DB_FILE))
- self.assertFalse(sqlite3_ds.zone_exist("example.org.",
- READ_ZONE_DB_FILE))
-
- def test_load_db(self):
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
-
- def test_locked_db(self):
- # load it first to make sure it exists
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
-
- # and manually create a writing session as well
- con = sqlite3.connect(WRITE_ZONE_DB_FILE);
- cur = con.cursor()
- cur.execute("delete from records")
-
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.load, WRITE_ZONE_DB_FILE, ".",
- example_reader)
-
- con.rollback()
-
- # and make sure lock does not stay
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
-
- # force locked db by nested loads
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.load, WRITE_ZONE_DB_FILE, ".",
- example_reader_nested)
-
- # and make sure lock does not stay
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+DBFILE_NEWSCHEMA = TESTDATA_PATH + "/newschema.sqlite3";
+DBFILE_OLDSCHEMA = TESTDATA_PATH + "/oldschema.sqlite3";
+DBFILE_NEW_MINOR_SCHEMA = TESTDATA_PATH + "/new_minor_schema.sqlite3";
class NewDBFile(unittest.TestCase):
- def tearDown(self):
- # remove the created database after every test
- if (os.path.exists(NEW_DB_FILE)):
- os.remove(NEW_DB_FILE)
-
- def setUp(self):
- # remove the created database before every test too, just
- # in case a test got aborted half-way, and cleanup didn't occur
- if (os.path.exists(NEW_DB_FILE)):
- os.remove(NEW_DB_FILE)
-
- def test_new_db(self):
- self.assertFalse(os.path.exists(NEW_DB_FILE))
- sqlite3_ds.open(NEW_DB_FILE)
- self.assertTrue(os.path.exists(NEW_DB_FILE))
-
- def test_new_db_locked(self):
- self.assertFalse(os.path.exists(NEW_DB_FILE))
- con = sqlite3.connect(NEW_DB_FILE);
- con.isolation_level = None
- cur = con.cursor()
- cur.execute("BEGIN IMMEDIATE TRANSACTION")
-
- # load should now fail, since the database is locked,
- # and the open() call needs an exclusive lock
- self.assertRaises(sqlite3.OperationalError,
- sqlite3_ds.open, NEW_DB_FILE, 0.1)
-
- con.rollback()
- cur.close()
- con.close()
- self.assertTrue(os.path.exists(NEW_DB_FILE))
-
- # now that we closed our connection, load should work again
- sqlite3_ds.open(NEW_DB_FILE)
-
- # the database should now have been created, and a new load should
- # not require an exclusive lock anymore, so we lock it again
- con = sqlite3.connect(NEW_DB_FILE);
- cur = con.cursor()
- cur.execute("BEGIN IMMEDIATE TRANSACTION")
- sqlite3_ds.open(NEW_DB_FILE, 0.1)
- con.rollback()
- cur.close()
- con.close()
+ def test_different_version(self):
+ self.assertTrue(os.path.exists(DBFILE_NEWSCHEMA))
+ self.assertRaises(sqlite3_ds.Sqlite3DSError, sqlite3_ds.open,
+ DBFILE_NEWSCHEMA)
+ self.assertRaises(sqlite3_ds.Sqlite3DSError, sqlite3_ds.open,
+ DBFILE_OLDSCHEMA)
+ self.assertNotEqual(None, sqlite3_ds.open(DBFILE_NEW_MINOR_SCHEMA)[0])
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3
index cc8cfc3..9c71cb5 100644
Binary files a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 and b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3
new file mode 100644
index 0000000..1542c20
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3
new file mode 100644
index 0000000..460cfa8
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3
new file mode 100644
index 0000000..b44c5eb
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
index e447622..97ffa00 100644
--- a/src/lib/python/isc/datasrc/updater_python.cc
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -48,6 +48,7 @@ using namespace isc::datasrc::python;
namespace isc_datasrc_internal {
// See finder_python.cc
PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args);
+PyObject* ZoneFinder_helper_all(ZoneFinder* finder, PyObject* args);
}
namespace {
@@ -74,7 +75,7 @@ typedef CPPPyObjectContainer<s_ZoneUpdater, ZoneUpdater> ZoneUpdaterContainer;
// General creation and destruction
int
-ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
+ZoneUpdater_init(PyObject*, PyObject*, PyObject*) {
// can't be called directly
PyErr_SetString(PyExc_TypeError,
"ZoneUpdater cannot be constructed directly");
@@ -83,7 +84,9 @@ ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
}
void
-ZoneUpdater_destroy(s_ZoneUpdater* const self) {
+ZoneUpdater_destroy(PyObject* po_self) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+
// cppobj is a shared ptr, but to make sure things are not destroyed in
// the wrong order, we reset it here.
self->cppobj.reset();
@@ -185,6 +188,13 @@ ZoneUpdater_find(PyObject* po_self, PyObject* args) {
args));
}
+PyObject*
+ZoneUpdater_find_all(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper_all(
+ &self->cppobj->getFinder(), args));
+}
+
// This list contains the actual set of functions we have in
// python. Each entry has
// 1. Python method name
@@ -192,22 +202,22 @@ ZoneUpdater_find(PyObject* po_self, PyObject* args) {
// 3. Argument type
// 4. Documentation
PyMethodDef ZoneUpdater_methods[] = {
- { "add_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_addRRset),
+ { "add_rrset", ZoneUpdater_addRRset,
METH_VARARGS, ZoneUpdater_addRRset_doc },
- { "delete_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_deleteRRset),
+ { "delete_rrset", ZoneUpdater_deleteRRset,
METH_VARARGS, ZoneUpdater_deleteRRset_doc },
- { "commit", reinterpret_cast<PyCFunction>(ZoneUpdater_commit), METH_NOARGS,
- ZoneUpdater_commit_doc },
+ { "commit", ZoneUpdater_commit, METH_NOARGS, ZoneUpdater_commit_doc },
// Instead of a getFinder, we implement the finder functionality directly
// This is because ZoneFinder is non-copyable, and we should not create
// a ZoneFinder object from a reference only (which is what is returned
// by getFinder(). Apart from that
- { "get_origin", reinterpret_cast<PyCFunction>(ZoneUpdater_getOrigin),
+ { "get_origin", ZoneUpdater_getOrigin,
METH_NOARGS, ZoneFinder_getOrigin_doc },
- { "get_class", reinterpret_cast<PyCFunction>(ZoneUpdater_getClass),
+ { "get_class", ZoneUpdater_getClass,
METH_NOARGS, ZoneFinder_getClass_doc },
- { "find", reinterpret_cast<PyCFunction>(ZoneUpdater_find), METH_VARARGS,
- ZoneFinder_find_doc },
+ { "find", ZoneUpdater_find, METH_VARARGS, ZoneFinder_find_doc },
+ { "find_all", ZoneUpdater_find_all, METH_VARARGS,
+ ZoneFinder_findAll_doc },
{ NULL, NULL, 0, NULL }
};
@@ -221,7 +231,7 @@ PyTypeObject zoneupdater_type = {
"datasrc.ZoneUpdater",
sizeof(s_ZoneUpdater), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(ZoneUpdater_destroy),// tp_dealloc
+ ZoneUpdater_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -252,7 +262,7 @@ PyTypeObject zoneupdater_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(ZoneUpdater_init),// tp_init
+ ZoneUpdater_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
@@ -270,15 +280,16 @@ PyObject*
createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
PyObject* base_obj)
{
- s_ZoneUpdater* py_zi = static_cast<s_ZoneUpdater*>(
+ s_ZoneUpdater* py_zu = static_cast<s_ZoneUpdater*>(
zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
- if (py_zi != NULL) {
- py_zi->cppobj = source;
- }
- if (base_obj != NULL) {
- Py_INCREF(base_obj);
+ if (py_zu != NULL) {
+ py_zu->cppobj = source;
+ py_zu->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
}
- return (py_zi);
+ return (py_zu);
}
} // namespace python
diff --git a/src/lib/python/isc/ddns/Makefile.am b/src/lib/python/isc/ddns/Makefile.am
new file mode 100644
index 0000000..1b9b6df
--- /dev/null
+++ b/src/lib/python/isc/ddns/Makefile.am
@@ -0,0 +1,23 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py session.py logger.py zone_config.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = libddns_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.pyc
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/libddns_messages.py: libddns_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libddns_messages.mes
+
+pythondir = $(pyexecdir)/isc/ddns
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/ddns/__init__.py b/src/lib/python/isc/ddns/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/ddns/libddns_messages.mes b/src/lib/python/isc/ddns/libddns_messages.mes
new file mode 100644
index 0000000..7e34e70
--- /dev/null
+++ b/src/lib/python/isc/ddns/libddns_messages.mes
@@ -0,0 +1,214 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libddns_messages python module.
+
+% LIBDDNS_DATASRC_ERROR update client %1 failed due to data source error: %2
+An update attempt failed due to some error in the corresponding data
+source. This is generally an unexpected event, but can still happen
+for various reasons such as DB lock contention or a failure of the
+backend DB server. The cause of the error is also logged. It's
+advisable to check the message, and, if necessary, take an appropriate
+action (e.g., restarting the DB server if it dies). If this message
+is logged the data source isn't modified due to the
+corresponding update request. When used by the b10-ddns, the server
+will return a response with an RCODE of SERVFAIL.
+
+% LIBDDNS_PREREQ_FORMERR update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL.
+The prerequisite with the given name, class and type is not well-formed.
+The specific prerequisite is shown. In this case, it has a non-zero TTL value.
+A FORMERR error response is sent to the client.
+
+% LIBDDNS_PREREQ_FORMERR_ANY update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found.
+The prerequisite with the given name, class and type is not well-formed.
+The specific prerequisite is shown. In this case, it either has a non-zero
+TTL value, or has rdata fields. A FORMERR error response is sent to the client.
+
+% LIBDDNS_PREREQ_FORMERR_CLASS update client %1 for zone %2: Format error in prerequisite (%3). Bad class.
+The prerequisite with the given name, class and type is not well-formed.
+The specific prerequisite is shown. In this case, the class of the
+prerequisite should either match the class of the zone in the Zone Section,
+or it should be ANY or NONE, and it is not. A FORMERR error response is sent
+to the client.
+
+% LIBDDNS_PREREQ_FORMERR_NONE update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL or rdata found.
+The prerequisite with the given name, class and type is not well-formed.
+The specific prerequisite is shown. In this case, it either has a non-zero
+TTL value, or has rdata fields. A FORMERR error response is sent to the client.
+
+% LIBDDNS_PREREQ_NAME_IN_USE_FAILED update client %1 for zone %2: 'Name is in use' prerequisite not satisfied (%3), rcode: %4
+A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that
+was not satisfied is shown. The client is sent an error response with the
+given rcode.
+In this case, the specific prerequisite is 'Name is in use'. From RFC2136:
+Name is in use. At least one RR with a specified NAME (in
+the zone and class specified by the Zone Section) must exist.
+Note that this prerequisite is NOT satisfied by empty
+nonterminals.
+
+% LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED update client %1 for zone %2: 'Name is not in use' (%3) prerequisite not satisfied, rcode: %4
+A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that
+was not satisfied is shown. The client is sent an error response with the
+given rcode.
+In this case, the specific prerequisite is 'Name is not in use'.
+From RFC2136:
+Name is not in use. No RR of any type is owned by a
+specified NAME. Note that this prerequisite IS satisfied by
+empty nonterminals.
+
+% LIBDDNS_PREREQ_NOTZONE update client %1 for zone %2: prerequisite not in zone (%3)
+A DDNS UPDATE prerequisite has a name that does not appear to be inside
+the zone specified in the Zone section of the UPDATE message.
+The specific prerequisite is shown. A NOTZONE error response is sent to
+the client.
+
+% LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED update client %1 for zone %2: 'RRset does not exist' (%3) prerequisite not satisfied, rcode: %4
+A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that
+was not satisfied is shown. The client is sent an error response with the
+given rcode.
+In this case, the specific prerequisite is 'RRset does not exist'.
+From RFC2136:
+RRset does not exist. No RRs with a specified NAME and TYPE
+(in the zone and class denoted by the Zone Section) can exist.
+
+% LIBDDNS_PREREQ_RRSET_EXISTS_FAILED update client %1 for zone %2: 'RRset exists (value independent)' (%3) prerequisite not satisfied, rcode: %4
+A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that
+was not satisfied is shown. The client is sent an error response with the
+given rcode.
+In this case, the specific prerequisite is 'RRset exists (value independent)'.
+From RFC2136:
+RRset exists (value dependent). A set of RRs with a
+specified NAME and TYPE exists and has the same members
+with the same RDATAs as the RRset specified here in this
+Section.
+
+% LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED update client %1 for zone %2: 'RRset exists (value dependent)' (%3) prerequisite not satisfied, rcode: %4
+A DNS UPDATE prerequisite was not satisfied. The specific prerequisite that
+was not satisfied is shown. The client is sent an error response with the
+given rcode.
+In this case, the specific prerequisite is 'RRset exists (value dependent)'.
+From RFC2136:
+RRset exists (value independent). At least one RR with a
+specified NAME and TYPE (in the zone and class specified by
+the Zone Section) must exist.
+
+% LIBDDNS_UPDATE_ADD_BAD_TYPE update client %1 for zone %2: update addition RR bad type: %3
+The Update section of a DDNS update message contains a statement
+that tries to add a record of an invalid type. Most likely the
+record has an RRType that is considered a 'meta' type, which
+cannot be zone content data. The specific record is shown.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_APPROVED update client %1 for zone %2 approved
+Debug message. An update request was approved in terms of the zone's
+update ACL.
+
+% LIBDDNS_UPDATE_BAD_CLASS update client %1 for zone %2: bad class in update RR: %3
+The Update section of a DDNS update message contains an RRset with
+a bad class. The class of the update RRset must be either the same
+as the class in the Zone Section, ANY, or NONE.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1
+An error occured while committing the DDNS update changes to the
+datasource. The specific error is printed. A SERVFAIL response is sent
+back to the client.
+
+% LIBDDNS_UPDATE_DELETE_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3
+The Update section of a DDNS update message contains a statement
+that tries to delete an rrset of an invalid type. Most likely the
+record has an RRType that is considered a 'meta' type, which
+cannot be zone content data. The specific record is shown.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3
+The Update section of a DDNS update message contains a 'delete rrset'
+statement with a non-zero TTL. This is not allowed by the protocol.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY update client %1 for zone %2: update deletion RR contains data %3
+The Update section of a DDNS update message contains a 'delete rrset'
+statement with a non-empty RRset. This is not allowed by the protocol.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3
+The Update section of a DDNS update message contains a statement
+that tries to delete one or more rrs of an invalid type. Most
+likely the records have an RRType that is considered a 'meta'
+type, which cannot be zone content data. The specific record is
+shown. A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3
+The Update section of a DDNS update message contains a 'delete rrs'
+statement with a non-zero TTL. This is not allowed by the protocol.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DENIED update client %1 for zone %2 denied
+Informational message. An update request was denied because it was
+rejected by the zone's update ACL. When this library is used by
+b10-ddns, the server will respond to the request with an RCODE of
+REFUSED as described in Section 3.3 of RFC2136.
+
+% LIBDDNS_UPDATE_DROPPED update client %1 for zone %2 dropped
+Informational message. An update request was denied because it was
+rejected by the zone's update ACL. When this library is used by
+b10-ddns, the server will then completely ignore the request; no
+response will be sent.
+
+% LIBDDNS_UPDATE_ERROR update client %1 for zone %2: %3
+Debug message. An error is found in processing a dynamic update
+request. This log message is used for general errors that are not
+normally expected to happen. So, in general, it would mean some
+problem in the client implementation or an interoperability issue
+with this implementation. The client's address, the zone name and
+class, and description of the error are logged.
+
+% LIBDDNS_UPDATE_FORWARD_FAIL update client %1 for zone %2: update forwarding not supported
+Debug message. An update request is sent to a secondary server. This
+is not necessarily invalid, but this implementation does not yet
+support update forwarding as specified in Section 6 of RFC2136 and it
+will simply return a response with an RCODE of NOTIMP to the client.
+The client's address and the zone name/class are logged.
+
+% LIBDDNS_UPDATE_NOTAUTH update client %1 for zone %2: not authoritative for update zone
+Debug message. An update request was received for a zone for which
+the receiving server doesn't have authority. In theory this is an
+unexpected event, but there are client implementations that could send
+update requests carelessly, so it may not necessarily be so uncommon
+in practice. If possible, you may want to check the implementation or
+configuration of those clients to suppress the requests. As specified
+in Section 3.1 of RFC2136, the receiving server will return a response
+with an RCODE of NOTAUTH.
+
+% LIBDDNS_UPDATE_NOTZONE update client %1 for zone %2: update RR out of zone %3
+A DDNS UPDATE record has a name that does not appear to be inside
+the zone specified in the Zone section of the UPDATE message.
+The specific update record is shown. A NOTZONE error response is
+sent to the client.
+
+% LIBDDNS_UPDATE_PREREQUISITE_FAILED prerequisite failed in update client %1 for zone %2: result code %3
+The handling of the prerequisite section (RFC2136 Section 3.2) found
+that one of the prerequisites was not satisfied. The result code
+should give more information on what prerequisite type failed.
+If the result code is FORMERR, the prerequisite section was not well-formed.
+An error response with the given result code is sent back to the client.
+
+% LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION update client %1 for zone %2: uncaught exception while processing update section: %3
+An uncaught exception was encountered while processing the Update
+section of a DDNS message. The specific exception is shown in the log message.
+To make sure DDNS service is not interrupted, this problem is caught instead
+of reraised; The update is aborted, and a SERVFAIL is sent back to the client.
+This is most probably a bug in the DDNS code, but *could* be caused by
+the data source.
diff --git a/src/lib/python/isc/ddns/logger.py b/src/lib/python/isc/ddns/logger.py
new file mode 100644
index 0000000..0f95bd7
--- /dev/null
+++ b/src/lib/python/isc/ddns/logger.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+""" This is a logging utility module for other modules of the ddns library
+package.
+
+"""
+
+import isc.log
+
+# The logger for this package
+logger = isc.log.Logger('libddns')
+
+class ClientFormatter:
+ """A utility class to convert a client address to string.
+
+ This class is constructed with a Python standard socket address tuple.
+ If it's 2-element tuple, it's assumed to be an IPv4 socket address
+ and will be converted to the form of '<addr>:<port>(/key=<tsig-key>)'.
+ If it's 4-element tuple, it's assumed to be an IPv6 socket address.
+ and will be converted to the form of '[<addr>]:<por>(/key=<tsig-key>)'.
+ The optional key=<tsig-key> will be added if a TSIG record is given
+ on construction. tsig-key is the TSIG key name in that case.
+
+ This class is designed to delay the conversion until it's explicitly
+ requested, so the conversion doesn't happen if the corresponding log
+ message is suppressed because of its log level (which is often the case
+ for debug messages).
+
+ Note: this optimization comes with the cost of instantiating the
+ formatter object itself. It's not really clear which overhead is
+ heavier, and we may conclude it's actually better to just generate
+ the strings unconditionally. Alternatively, we can make the stored
+ address of this object replaceable so that this object can be reused.
+ Right now this is an open issue.
+
+ """
+ def __init__(self, addr, tsig_record=None):
+ self.__addr = addr
+ self.__tsig_record = tsig_record
+
+ def __format_addr(self):
+ if len(self.__addr) == 2:
+ return self.__addr[0] + ':' + str(self.__addr[1])
+ elif len(self.__addr) == 4:
+ return '[' + self.__addr[0] + ']:' + str(self.__addr[1])
+ return None
+
+ def __str__(self):
+ format = self.__format_addr()
+ if format is not None and self.__tsig_record is not None:
+ format += '/key=' + self.__tsig_record.get_name().to_text(True)
+ return format
+
+class ZoneFormatter:
+ """A utility class to convert zone name and class to string.
+
+ This class is constructed with a name of a zone (isc.dns.Name object)
+ and its RR class (isc.dns.RRClass object). Its text conversion method
+ (__str__) converts them into a string in the form of
+ '<zone name>/<zone class>' where the trailing dot of the zone name
+ is omitted.
+
+ If the given zone name on construction is None, it's assumed to be
+ the zone isn't identified but needs to be somehow logged. The conversion
+ method returns a special string to indicate this case.
+
+ This class is designed to delay the conversion until it's explicitly
+ requested, so the conversion doesn't happen if the corresponding log
+ message is suppressed because of its log level (which is often the case
+ for debug messages).
+
+ See the note for the ClientFormatter class about overhead tradeoff.
+ This class shares the same discussion.
+
+ """
+ def __init__(self, zname, zclass):
+ self.__zname = zname
+ self.__zclass = zclass
+
+ def __str__(self):
+ if self.__zname is None:
+ return '(zone unknown/not determined)'
+ return self.__zname.to_text(True) + '/' + self.__zclass.to_text()
+
+class RRsetFormatter:
+ """A utility class to convert rrsets to a short descriptive string.
+
+ This class is constructed with an rrset (isc.dns.RRset object).
+ Its text conversion method (__str__) converts it into a string
+ with only the name, class and type of the rrset.
+ This is used in logging so that the RRset can be identified, without
+ being completely printed, which would result in an unnecessary
+ multi-line message.
+
+ This class is designed to delay the conversion until it's explicitly
+ requested, so the conversion doesn't happen if the corresponding log
+ message is suppressed because of its log level.
+
+ See the note for the ClientFormatter class about overhead tradeoff.
+ This class shares the same discussion.
+ """
+ def __init__(self, rrset):
+ self.__rrset = rrset
+
+ def __str__(self):
+ return self.__rrset.get_name().to_text() + " " +\
+ self.__rrset.get_class().to_text() + " " +\
+ self.__rrset.get_type().to_text()
diff --git a/src/lib/python/isc/ddns/session.py b/src/lib/python/isc/ddns/session.py
new file mode 100644
index 0000000..366bc8b
--- /dev/null
+++ b/src/lib/python/isc/ddns/session.py
@@ -0,0 +1,864 @@
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from isc.dns import *
+import isc.ddns.zone_config
+from isc.log import *
+from isc.ddns.logger import logger, ClientFormatter, ZoneFormatter,\
+ RRsetFormatter
+from isc.log_messages.libddns_messages import *
+from isc.datasrc import ZoneFinder
+import isc.xfrin.diff
+from isc.acl.acl import ACCEPT, REJECT, DROP
+import copy
+
+# Result codes for UpdateSession.handle()
+UPDATE_SUCCESS = 0
+UPDATE_ERROR = 1
+UPDATE_DROP = 2
+
+# Convenient aliases of update-specific section names
+SECTION_ZONE = Message.SECTION_QUESTION
+SECTION_PREREQUISITE = Message.SECTION_ANSWER
+SECTION_UPDATE = Message.SECTION_AUTHORITY
+
+# Shortcut
+DBGLVL_TRACE_BASIC = logger.DBGLVL_TRACE_BASIC
+
+class UpdateError(Exception):
+ '''Exception for general error in update request handling.
+
+ This exception is intended to be used internally within this module.
+ When UpdateSession.handle() encounters an error in handling an update
+ request it can raise this exception to terminate the handling.
+
+ This class is constructed with some information that may be useful for
+ subsequent possible logging:
+ - msg (string) A string explaining the error.
+ - zname (isc.dns.Name) The zone name. Can be None when not identified.
+ - zclass (isc.dns.RRClass) The zone class. Like zname, can be None.
+ - rcode (isc.dns.RCode or None) The RCODE to be set in the response
+ message; this can be None if the response is not expected to be sent.
+ - nolog (bool) If True, it indicates there's no more need for logging.
+
+ '''
+ def __init__(self, msg, zname, zclass, rcode, nolog=False):
+ Exception.__init__(self, msg)
+ self.zname = zname
+ self.zclass = zclass
+ self.rcode = rcode
+ self.nolog = nolog
+
+def foreach_rr(rrset):
+ '''
+ Generator that creates a new RRset with one RR from
+ the given RRset upon each iteration, usable in calls that
+ need to loop over an RRset and perform an action with each
+ of the individual RRs in it.
+ Example:
+ for rr in foreach_rr(rrset):
+ print(str(rr))
+ '''
+ for rdata in rrset.get_rdata():
+ rr = isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())
+ rr.add_rdata(rdata)
+ yield rr
+
+def convert_rrset_class(rrset, rrclass):
+ '''Returns a (new) rrset with the data from the given rrset,
+ but of the given class. Useful to convert from NONE and ANY to
+ a real class.
+ Note that the caller should be careful what to convert;
+ and DNS error that could happen during wire-format reading
+ could technically occur here, and is not caught by this helper.
+ '''
+ new_rrset = isc.dns.RRset(rrset.get_name(), rrclass,
+ rrset.get_type(), rrset.get_ttl())
+ for rdata in rrset.get_rdata():
+ # Rdata class is nof modifiable, and must match rrset's
+ # class, so we need to to some ugly conversion here.
+ # And we cannot use to_text() (since the class may be unknown)
+ wire = rdata.to_wire(bytes())
+ new_rrset.add_rdata(isc.dns.Rdata(rrset.get_type(), rrclass, wire))
+ return new_rrset
+
+def collect_rrsets(collection, rrset):
+ '''
+ Helper function to collect similar rrsets.
+ Collect all rrsets with the same name, class, and type
+ collection is the currently collected list of RRsets,
+ rrset is the RRset to add;
+ if an RRset with the same name, class and type as the
+ given rrset exists in the collection, its rdata fields
+ are added to that RRset. Otherwise, the rrset is added
+ to the given collection.
+ TTL is ignored.
+ This method does not check rdata contents for duplicate
+ values.
+
+ The collection and its rrsets are modified in-place,
+ this method does not return anything.
+ '''
+ found = False
+ for existing_rrset in collection:
+ if existing_rrset.get_name() == rrset.get_name() and\
+ existing_rrset.get_class() == rrset.get_class() and\
+ existing_rrset.get_type() == rrset.get_type():
+ for rdata in rrset.get_rdata():
+ existing_rrset.add_rdata(rdata)
+ found = True
+ if not found:
+ collection.append(rrset)
+
+class DDNS_SOA:
+ '''Class to handle the SOA in the DNS update '''
+
+ def __get_serial_internal(self, origin_soa):
+ '''Get serial number from soa'''
+ return Serial(int(origin_soa.get_rdata()[0].to_text().split()[2]))
+
+ def __write_soa_internal(self, origin_soa, soa_num):
+ '''Write back serial number to soa'''
+ new_soa = RRset(origin_soa.get_name(), origin_soa.get_class(),
+ RRType.SOA(), origin_soa.get_ttl())
+ soa_rdata_parts = origin_soa.get_rdata()[0].to_text().split()
+ soa_rdata_parts[2] = str(soa_num.get_value())
+ new_soa.add_rdata(Rdata(origin_soa.get_type(), origin_soa.get_class(),
+ " ".join(soa_rdata_parts)))
+ return new_soa
+
+ def soa_update_check(self, origin_soa, new_soa):
+ '''Check whether the new soa is valid. If the serial number is bigger
+ than the old one, it is valid, then return True, otherwise, return
+ False. Make sure the origin_soa and new_soa parameters are not none
+ before invoke soa_update_check.
+ Parameters:
+ origin_soa, old SOA resource record.
+ new_soa, new SOA resource record.
+ Output:
+ if the serial number of new soa is bigger than the old one, return
+ True, otherwise return False.
+ '''
+ old_serial = self.__get_serial_internal(origin_soa)
+ new_serial = self.__get_serial_internal(new_soa)
+ if(new_serial > old_serial):
+ return True
+ else:
+ return False
+
+ def update_soa(self, origin_soa, inc_number = 1):
+ ''' Update the soa number incrementally as RFC 2136. Please make sure
+ that the origin_soa exists and not none before invoke this function.
+ Parameters:
+ origin_soa, the soa resource record which will be updated.
+ inc_number, the number which will be added into the serial number of
+ origin_soa, the default value is one.
+ Output:
+ The new origin soa whoes serial number has been updated.
+ '''
+ soa_num = self.__get_serial_internal(origin_soa)
+ soa_num = soa_num + inc_number
+ if soa_num.get_value() == 0:
+ soa_num = soa_num + 1
+ return self.__write_soa_internal(origin_soa, soa_num)
+
+class UpdateSession:
+ '''Protocol handling for a single dynamic update request.
+
+ This class is instantiated with a request message and some other
+ information that will be used for handling the request. Its main
+ method, handle(), will process the request, and normally build
+ a response message according to the result. The application of this
+ class can use the message to send a response to the client.
+
+ '''
+ def __init__(self, req_message, client_addr, zone_config):
+ '''Constructor.
+
+ Parameters:
+ - req_message (isc.dns.Message) The request message. This must be
+ in the PARSE mode, its Opcode must be UPDATE, and must have been
+ TSIG validatd if it's TSIG signed.
+ - client_addr (socket address) The address/port of the update client
+ in the form of Python socket address object. This is mainly for
+ logging and access control.
+ - zone_config (ZoneConfig) A tentative container that encapsulates
+ the server's zone configuration. See zone_config.py.
+ - req_data (binary) Wire format data of the request message.
+ It will be used for TSIG verification if necessary.
+
+ '''
+ self.__message = req_message
+ self.__tsig = req_message.get_tsig_record()
+ self.__client_addr = client_addr
+ self.__zone_config = zone_config
+ self.__added_soa = None
+
+ def get_message(self):
+ '''Return the update message.
+
+ After handle() is called, it's generally transformed to the response
+ to be returned to the client. If the request has been dropped,
+ this method returns None. If this method is called before handle()
+ the return value would be identical to the request message passed on
+ construction, although it's of no practical use.
+
+ '''
+ return self.__message
+
+ def handle(self):
+ '''Handle the update request according to RFC2136.
+
+ This method returns a tuple of the following three elements that
+ indicate the result of the request.
+ - Result code of the request processing, which are:
+ UPDATE_SUCCESS Update request granted and succeeded.
+ UPDATE_ERROR Some error happened to be reported in the response.
+ UPDATE_DROP Error happened and no response should be sent.
+ Except the case of UPDATE_DROP, the UpdateSession object will have
+ created a response that is to be returned to the request client,
+ which can be retrieved by get_message(). If it's UPDATE_DROP,
+ subsequent call to get_message() returns None.
+ - The name of the updated zone (isc.dns.Name object) in case of
+ UPDATE_SUCCESS; otherwise None.
+ - The RR class of the updated zone (isc.dns.RRClass object) in case
+ of UPDATE_SUCCESS; otherwise None.
+
+ '''
+ try:
+ self._get_update_zone()
+ # Contrary to what RFC2136 specifies, we do ACL checks before
+ # prerequisites. It's now generally considered to be a bad
+ # idea, and actually does harm such as information
+ # leak. It should make more sense to prevent any security issues
+ # by performing ACL check as early as possible.
+ self.__check_update_acl(self.__zname, self.__zclass)
+ self._create_diff()
+ prereq_result = self.__check_prerequisites()
+ if prereq_result != Rcode.NOERROR():
+ self.__make_response(prereq_result)
+ return UPDATE_ERROR, self.__zname, self.__zclass
+ update_result = self.__do_update()
+ if update_result != Rcode.NOERROR():
+ self.__make_response(update_result)
+ return UPDATE_ERROR, self.__zname, self.__zclass
+ self.__make_response(Rcode.NOERROR())
+ return UPDATE_SUCCESS, self.__zname, self.__zclass
+ except UpdateError as e:
+ if not e.nolog:
+ logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_ERROR,
+ ClientFormatter(self.__client_addr, self.__tsig),
+ ZoneFormatter(e.zname, e.zclass), e)
+ # If RCODE is specified, create a corresponding resonse and return
+ # ERROR; otherwise clear the message and return DROP.
+ if e.rcode is not None:
+ self.__make_response(e.rcode)
+ return UPDATE_ERROR, None, None
+ self.__message = None
+ return UPDATE_DROP, None, None
+ except isc.datasrc.Error as e:
+ logger.error(LIBDDNS_DATASRC_ERROR,
+ ClientFormatter(self.__client_addr, self.__tsig), e)
+ self.__make_response(Rcode.SERVFAIL())
+ return UPDATE_ERROR, None, None
+
+ def _get_update_zone(self):
+ '''Parse the zone section and find the zone to be updated.
+
+ If the zone section is valid and the specified zone is found in
+ the configuration, sets private member variables for this session:
+ __datasrc_client: A matching data source that contains the specified
+ zone
+ __zname: The zone name as a Name object
+ __zclass: The zone class as an RRClass object
+ If this method raises an exception, these members are not set.
+
+ Note: This method is protected for ease of use in tests, where
+ methods are tested that need the setup done here without calling
+ the full handle() method.
+ '''
+ # Validation: the zone section must contain exactly one question,
+ # and it must be of type SOA.
+ n_zones = self.__message.get_rr_count(SECTION_ZONE)
+ if n_zones != 1:
+ raise UpdateError('Invalid number of records in zone section: ' +
+ str(n_zones), None, None, Rcode.FORMERR())
+ zrecord = self.__message.get_question()[0]
+ if zrecord.get_type() != RRType.SOA():
+ raise UpdateError('update zone section contains non-SOA',
+ None, None, Rcode.FORMERR())
+
+ # See if we're serving a primary zone specified in the zone section.
+ zname = zrecord.get_name()
+ zclass = zrecord.get_class()
+ zone_type, datasrc_client = self.__zone_config.find_zone(zname, zclass)
+ if zone_type == isc.ddns.zone_config.ZONE_PRIMARY:
+ self.__datasrc_client = datasrc_client
+ self.__zname = zname
+ self.__zclass = zclass
+ return
+ elif zone_type == isc.ddns.zone_config.ZONE_SECONDARY:
+ # We are a secondary server; since we don't yet support update
+ # forwarding, we return 'not implemented'.
+ logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_FORWARD_FAIL,
+ ClientFormatter(self.__client_addr, self.__tsig),
+ ZoneFormatter(zname, zclass))
+ raise UpdateError('forward', zname, zclass, Rcode.NOTIMP(), True)
+ # zone wasn't found
+ logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_NOTAUTH,
+ ClientFormatter(self.__client_addr, self.__tsig),
+ ZoneFormatter(zname, zclass))
+ raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH(), True)
+
+ def _create_diff(self):
+ '''
+ Initializes the internal data structure used for searching current
+ data and for adding and deleting data. This is supposed to be called
+ after ACL checks but before prerequisite checks (since the latter
+ needs the find calls provided by the Diff class).
+ Adds the private member:
+ __diff: A buffer of changes made against the zone by this update
+ This object also contains find() calls, see documentation
+ of the Diff class.
+
+ Note: This method is protected for ease of use in tests, where
+ methods are tested that need the setup done here without calling
+ the full handle() method.
+ '''
+ self.__diff = isc.xfrin.diff.Diff(self.__datasrc_client,
+ self.__zname,
+ journaling=True,
+ single_update_mode=True)
+
+ def __check_update_acl(self, zname, zclass):
+ '''Apply update ACL for the zone to be updated.'''
+ acl = self.__zone_config.get_update_acl(zname, zclass)
+ action = acl.execute(isc.acl.dns.RequestContext(
+ (self.__client_addr[0], self.__client_addr[1]), self.__tsig))
+ if action == REJECT:
+ logger.info(LIBDDNS_UPDATE_DENIED,
+ ClientFormatter(self.__client_addr, self.__tsig),
+ ZoneFormatter(zname, zclass))
+ raise UpdateError('rejected', zname, zclass, Rcode.REFUSED(), True)
+ if action == DROP:
+ logger.info(LIBDDNS_UPDATE_DROPPED,
+ ClientFormatter(self.__client_addr, self.__tsig),
+ ZoneFormatter(zname, zclass))
+ raise UpdateError('dropped', zname, zclass, None, True)
+ logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_APPROVED,
+ ClientFormatter(self.__client_addr, self.__tsig),
+ ZoneFormatter(zname, zclass))
+
+ def __make_response(self, rcode):
+ '''Transform the internal message to the update response.
+
+ According RFC2136 Section 3.8, the zone section will be cleared
+ as well as other sections. The response Rcode will be set to the
+ given value.
+
+ '''
+ self.__message.make_response()
+ self.__message.clear_section(SECTION_ZONE)
+ self.__message.set_rcode(rcode)
+
+ def __prereq_rrset_exists(self, rrset):
+ '''Check whether an rrset with the given name and type exists. Class,
+ TTL, and Rdata (if any) of the given RRset are ignored.
+ RFC2136 Section 2.4.1.
+ Returns True if the prerequisite is satisfied, False otherwise.
+
+ Note: the only thing used in the call to find() here is the
+ result status. The actual data is immediately dropped. As
+ a future optimization, we may want to add a find() option to
+ only return what the result code would be (and not read/copy
+ any actual data).
+ '''
+ result, _, _ = self.__diff.find(rrset.get_name(), rrset.get_type())
+ return result == ZoneFinder.SUCCESS
+
+ def __prereq_rrset_exists_value(self, rrset):
+ '''Check whether an rrset that matches name, type, and rdata(s) of the
+ given rrset exists.
+ RFC2136 Section 2.4.2
+ Returns True if the prerequisite is satisfied, False otherwise.
+ '''
+ result, found_rrset, _ = self.__diff.find(rrset.get_name(),
+ rrset.get_type())
+ if result == ZoneFinder.SUCCESS and\
+ rrset.get_name() == found_rrset.get_name() and\
+ rrset.get_type() == found_rrset.get_type():
+ # We need to match all actual RRs, unfortunately there is no
+ # direct order-independent comparison for rrsets, so this
+ # a slightly inefficient way to handle that.
+
+ # shallow copy of the rdata list, so we are sure that this
+ # loop does not mess with actual data.
+ found_rdata = copy.copy(found_rrset.get_rdata())
+ for rdata in rrset.get_rdata():
+ if rdata in found_rdata:
+ found_rdata.remove(rdata)
+ else:
+ return False
+ return len(found_rdata) == 0
+ return False
+
+ def __prereq_rrset_does_not_exist(self, rrset):
+ '''Check whether no rrsets with the same name and type as the given
+ rrset exist.
+ RFC2136 Section 2.4.3.
+ Returns True if the prerequisite is satisfied, False otherwise.
+ '''
+ return not self.__prereq_rrset_exists(rrset)
+
+ def __prereq_name_in_use(self, rrset):
+ '''Check whether the name of the given RRset is in use (i.e. has
+ 1 or more RRs).
+ RFC2136 Section 2.4.4
+ Returns True if the prerequisite is satisfied, False otherwise.
+
+ Note: the only thing used in the call to find_all() here is
+ the result status. The actual data is immediately dropped. As
+ a future optimization, we may want to add a find_all() option
+ to only return what the result code would be (and not read/copy
+ any actual data).
+ '''
+ result, rrsets, flags = self.__diff.find_all(rrset.get_name())
+ if result == ZoneFinder.SUCCESS and\
+ (flags & ZoneFinder.RESULT_WILDCARD == 0):
+ return True
+ return False
+
+ def __prereq_name_not_in_use(self, rrset):
+ '''Check whether the name of the given RRset is not in use (i.e. does
+ not exist at all, or is an empty nonterminal.
+ RFC2136 Section 2.4.5.
+ Returns True if the prerequisite is satisfied, False otherwise.
+ '''
+ return not self.__prereq_name_in_use(rrset)
+
+ def __check_in_zone(self, rrset):
+ '''Returns true if the name of the given rrset is equal to
+ or a subdomain of the zname from the Zone Section.'''
+ relation = rrset.get_name().compare(self.__zname).get_relation()
+ return relation == NameComparisonResult.SUBDOMAIN or\
+ relation == NameComparisonResult.EQUAL
+
+ def __check_prerequisites(self):
+ '''Check the prerequisites section of the UPDATE Message.
+ RFC2136 Section 2.4.
+ Returns a dns Rcode signaling either no error (Rcode.NOERROR())
+ or that one of the prerequisites failed (any other Rcode).
+ '''
+
+ # Temporary array to store exact-match RRsets
+ exact_match_rrsets = []
+
+ for rrset in self.__message.get_section(SECTION_PREREQUISITE):
+ # First check if the name is in the zone
+ if not self.__check_in_zone(rrset):
+ logger.info(LIBDDNS_PREREQ_NOTZONE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.NOTZONE()
+
+ # Algorithm taken from RFC2136 Section 3.2
+ if rrset.get_class() == RRClass.ANY():
+ if rrset.get_ttl().get_value() != 0 or\
+ rrset.get_rdata_count() != 0:
+ logger.info(LIBDDNS_PREREQ_FORMERR_ANY,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ elif rrset.get_type() == RRType.ANY():
+ if not self.__prereq_name_in_use(rrset):
+ rcode = Rcode.NXDOMAIN()
+ logger.info(LIBDDNS_PREREQ_NAME_IN_USE_FAILED,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset), rcode)
+ return rcode
+ else:
+ if not self.__prereq_rrset_exists(rrset):
+ rcode = Rcode.NXRRSET()
+ logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_FAILED,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset), rcode)
+ return rcode
+ elif rrset.get_class() == RRClass.NONE():
+ if rrset.get_ttl().get_value() != 0 or\
+ rrset.get_rdata_count() != 0:
+ logger.info(LIBDDNS_PREREQ_FORMERR_NONE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ elif rrset.get_type() == RRType.ANY():
+ if not self.__prereq_name_not_in_use(rrset):
+ rcode = Rcode.YXDOMAIN()
+ logger.info(LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset), rcode)
+ return rcode
+ else:
+ if not self.__prereq_rrset_does_not_exist(rrset):
+ rcode = Rcode.YXRRSET()
+ logger.info(LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset), rcode)
+ return rcode
+ elif rrset.get_class() == self.__zclass:
+ if rrset.get_ttl().get_value() != 0:
+ logger.info(LIBDDNS_PREREQ_FORMERR,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ else:
+ collect_rrsets(exact_match_rrsets, rrset)
+ else:
+ logger.info(LIBDDNS_PREREQ_FORMERR_CLASS,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+
+ for collected_rrset in exact_match_rrsets:
+ if not self.__prereq_rrset_exists_value(collected_rrset):
+ rcode = Rcode.NXRRSET()
+ logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(collected_rrset), rcode)
+ return rcode
+
+ # All prerequisites are satisfied
+ return Rcode.NOERROR()
+
+ def __set_soa_rrset(self, rrset):
+ '''Sets the given rrset to the member __added_soa (which
+ is used by __do_update for updating the SOA record'''
+ self.__added_soa = rrset
+
+ def __do_prescan(self):
+ '''Perform the prescan as defined in RFC2136 section 3.4.1.
+ This method has a side-effect; it sets self._new_soa if
+ it encounters the addition of a SOA record in the update
+ list (so serial can be checked by update later, etc.).
+ It puts the added SOA in self.__added_soa.
+ '''
+ for rrset in self.__message.get_section(SECTION_UPDATE):
+ if not self.__check_in_zone(rrset):
+ logger.info(LIBDDNS_UPDATE_NOTZONE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.NOTZONE()
+ if rrset.get_class() == self.__zclass:
+ # In fact, all metatypes are in a specific range,
+ # so one check can test TKEY to ANY
+ # (some value check is needed anyway, since we do
+ # not have defined RRtypes for MAILA and MAILB)
+ if rrset.get_type().get_code() >= 249:
+ logger.info(LIBDDNS_UPDATE_ADD_BAD_TYPE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_type() == RRType.SOA():
+ # In case there's multiple soa records in the update
+ # somehow, just take the last
+ for rr in foreach_rr(rrset):
+ self.__set_soa_rrset(rr)
+ elif rrset.get_class() == RRClass.ANY():
+ if rrset.get_ttl().get_value() != 0:
+ logger.info(LIBDDNS_UPDATE_DELETE_NONZERO_TTL,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_rdata_count() > 0:
+ logger.info(LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_type().get_code() >= 249 and\
+ rrset.get_type().get_code() <= 254:
+ logger.info(LIBDDNS_UPDATE_DELETE_BAD_TYPE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ elif rrset.get_class() == RRClass.NONE():
+ if rrset.get_ttl().get_value() != 0:
+ logger.info(LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_type().get_code() >= 249:
+ logger.info(LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ else:
+ logger.info(LIBDDNS_UPDATE_BAD_CLASS,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ return Rcode.NOERROR()
+
+ def __do_update_add_single_rr(self, rr, existing_rrset):
+ '''Helper for __do_update_add_rrs_to_rrset: only add the
+ rr if it is not present yet
+ (note that rr here should already be a single-rr rrset)
+ '''
+ if existing_rrset is None:
+ self.__diff.add_data(rr)
+ else:
+ rr_rdata = rr.get_rdata()[0]
+ if not rr_rdata in existing_rrset.get_rdata():
+ self.__diff.add_data(rr)
+
+ def __do_update_add_rrs_to_rrset(self, rrset):
+ '''Add the rrs from the given rrset to the internal diff.
+ There is handling for a number of special cases mentioned
+ in RFC2136;
+ - If the addition is a CNAME, but existing data at its
+ name is not, the addition is ignored, and vice versa.
+ - If it is a CNAME, and existing data is too, it is
+ replaced (existing data is deleted)
+ An additional restriction is that SOA data is ignored as
+ well (it is handled separately by the __do_update method).
+
+ Note that in the (near) future, this method may have
+ addition special-cases processing.
+ '''
+ # For a number of cases, we may need to remove data in the zone
+ # (note; SOA is handled separately by __do_update, so that one
+ # is explicitely ignored here)
+ if rrset.get_type() == RRType.SOA():
+ return
+ result, orig_rrset, _ = self.__diff.find(rrset.get_name(),
+ rrset.get_type())
+ if result == ZoneFinder.CNAME:
+ # Ignore non-cname rrs that try to update CNAME records
+ # (if rrset itself is a CNAME, the finder result would be
+ # SUCCESS, see next case)
+ return
+ elif result == ZoneFinder.SUCCESS:
+ # if update is cname, and zone rr is not, ignore
+ if rrset.get_type() == RRType.CNAME():
+ # Remove original CNAME record (the new one
+ # is added below)
+ self.__diff.delete_data(orig_rrset)
+ # We do not have WKS support at this time, but if there
+ # are special Update equality rules such as for WKS, and
+ # we do have support for the type, this is where the check
+ # (and potential delete) would go.
+ elif result == ZoneFinder.NXRRSET:
+ # There is data present, but not for this type.
+ # If this type is CNAME, ignore the update
+ if rrset.get_type() == RRType.CNAME():
+ return
+ for rr in foreach_rr(rrset):
+ self.__do_update_add_single_rr(rr, orig_rrset)
+
+ def __do_update_delete_rrset(self, rrset):
+ '''Deletes the rrset with the name and type of the given
+ rrset from the zone data (by putting all existing data
+ in the internal diff as delete statements).
+ Special cases: if the delete statement is for the
+ zone's apex, and the type is either SOA or NS, it
+ is ignored.'''
+ # find the rrset with local updates
+ result, to_delete, _ = self.__diff.find_updated(rrset.get_name(),
+ rrset.get_type())
+ if result == ZoneFinder.SUCCESS:
+ if to_delete.get_name() == self.__zname and\
+ (to_delete.get_type() == RRType.SOA() or\
+ to_delete.get_type() == RRType.NS()):
+ # ignore
+ return
+ for rr in foreach_rr(to_delete):
+ self.__diff.delete_data(rr)
+
+ def __ns_deleter_helper(self, rrset):
+ '''Special case helper for deleting NS resource records
+ at the zone apex. In that scenario, the last NS record
+ may never be removed (and any action that would do so
+ should be ignored).
+ '''
+ # Find the current NS rrset, including local additions and deletions
+ result, orig_rrset, _ = self.__diff.find_updated(rrset.get_name(),
+ rrset.get_type())
+
+ # Even a real rrset comparison wouldn't help here...
+ # The goal is to make sure that after deletion of the
+ # given rrset, at least 1 NS record is left (at the apex).
+ # So we make a (shallow) copy of the existing rrset,
+ # and for each rdata in the to_delete set, we check if it wouldn't
+ # delete the last one. If it would, that specific one is ignored.
+ # If it would not, the rdata is removed from the temporary list
+ orig_rrset_rdata = copy.copy(orig_rrset.get_rdata())
+ for rdata in rrset.get_rdata():
+ if len(orig_rrset_rdata) == 1 and rdata == orig_rrset_rdata[0]:
+ # ignore
+ continue
+ else:
+ # create an individual RRset for deletion
+ to_delete = isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())
+ to_delete.add_rdata(rdata)
+ orig_rrset_rdata.remove(rdata)
+ self.__diff.delete_data(to_delete)
+
+ def __do_update_delete_name(self, rrset):
+ '''Delete all data at the name of the given rrset,
+ by adding all data found by find_all as delete statements
+ to the internal diff.
+ Special case: if the name is the zone's apex, SOA and
+ NS records are kept.
+ '''
+ # Find everything with the name, including local additions
+ result, rrsets, flags = self.__diff.find_all_updated(rrset.get_name())
+ if result == ZoneFinder.SUCCESS and\
+ (flags & ZoneFinder.RESULT_WILDCARD == 0):
+ for to_delete in rrsets:
+ # if name == self.__zname and type is soa or ns, don't delete!
+ if to_delete.get_name() == self.__zname and\
+ (to_delete.get_type() == RRType.SOA() or
+ to_delete.get_type() == RRType.NS()):
+ continue
+ else:
+ for rr in foreach_rr(to_delete):
+ self.__diff.delete_data(rr)
+
+ def __do_update_delete_rrs_from_rrset(self, rrset):
+ '''Deletes all resource records in the given rrset from the
+ zone. Resource records that do not exist are ignored.
+ If the rrset if of type SOA, it is ignored.
+ Uses the __ns_deleter_helper if the rrset's name is the
+ zone's apex, and the type is NS.
+ '''
+ # Delete all rrs in the rrset, except if name=self.__zname and type=soa, or
+ # type = ns and there is only one left (...)
+
+ # The delete does not want class NONE, we would not have gotten here
+ # if it wasn't, but now is a good time to change it to the zclass.
+ to_delete = convert_rrset_class(rrset, self.__zclass)
+
+ if rrset.get_name() == self.__zname:
+ if rrset.get_type() == RRType.SOA():
+ # ignore
+ return
+ elif rrset.get_type() == RRType.NS():
+ # hmm. okay. annoying. There must be at least one left,
+ # delegate to helper method
+ self.__ns_deleter_helper(to_delete)
+ return
+ for rr in foreach_rr(to_delete):
+ self.__diff.delete_data(rr)
+
+ def __update_soa(self):
+ '''Checks the member value __added_soa, and depending on
+ whether it has been set and what its value is, creates
+ a new SOA if necessary.
+ Then removes the original SOA and adds the new one,
+ by adding the needed operations to the internal diff.'''
+ # Get the existing SOA
+ # if a new soa was specified, add that one, otherwise, do the
+ # serial magic and add the newly created one
+
+ # get it from DS and to increment and stuff
+ result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA(),
+ ZoneFinder.NO_WILDCARD |
+ ZoneFinder.FIND_GLUE_OK)
+ # We may implement recovering from missing SOA data at some point, but
+ # for now servfail on such a broken state
+ if result != ZoneFinder.SUCCESS:
+ raise UpdateError("Error finding SOA record in datasource.",
+ self.__zname, self.__zclass, Rcode.SERVFAIL())
+ serial_operation = DDNS_SOA()
+ if self.__added_soa is not None and\
+ serial_operation.soa_update_check(old_soa, self.__added_soa):
+ new_soa = self.__added_soa
+ else:
+ # increment goes here
+ new_soa = serial_operation.update_soa(old_soa)
+
+ self.__diff.delete_data(old_soa)
+ self.__diff.add_data(new_soa)
+
+ def __do_update(self):
+ '''Scan, check, and execute the Update section in the
+ DDNS Update message.
+ Returns an Rcode to signal the result (NOERROR upon success,
+ any error result otherwise).
+ '''
+ # prescan
+ prescan_result = self.__do_prescan()
+ if prescan_result != Rcode.NOERROR():
+ return prescan_result
+
+ # update
+ try:
+ # Do special handling for SOA first
+ self.__update_soa()
+
+ # Algorithm from RFC2136 Section 3.4
+ # Note that this works on full rrsets, not individual RRs.
+ # Some checks might be easier with individual RRs, but only if we
+ # would use the ZoneUpdater directly (so we can query the
+ # 'zone-as-it-would-be-so-far'. However, due to the current use
+ # of the Diff class, this is not the case, and therefore it
+ # is easier to work with full rrsets for the most parts
+ # (less lookups needed; conversion to individual rrs is
+ # the same effort whether it is done here or in the several
+ # do_update statements)
+ for rrset in self.__message.get_section(SECTION_UPDATE):
+ if rrset.get_class() == self.__zclass:
+ self.__do_update_add_rrs_to_rrset(rrset)
+ elif rrset.get_class() == RRClass.ANY():
+ if rrset.get_type() == RRType.ANY():
+ self.__do_update_delete_name(rrset)
+ else:
+ self.__do_update_delete_rrset(rrset)
+ elif rrset.get_class() == RRClass.NONE():
+ self.__do_update_delete_rrs_from_rrset(rrset)
+
+ self.__diff.commit()
+ return Rcode.NOERROR()
+ except isc.datasrc.Error as dse:
+ logger.info(LIBDDNS_UPDATE_DATASRC_ERROR, dse)
+ return Rcode.SERVFAIL()
+ except Exception as uce:
+ logger.error(LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ uce)
+ return Rcode.SERVFAIL()
diff --git a/src/lib/python/isc/ddns/tests/Makefile.am b/src/lib/python/isc/ddns/tests/Makefile.am
new file mode 100644
index 0000000..4235a2b
--- /dev/null
+++ b/src/lib/python/isc/ddns/tests/Makefile.am
@@ -0,0 +1,28 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = session_tests.py zone_config_tests.py
+EXTRA_DIST = $(PYTESTS)
+CLEANFILES = $(builddir)/rwtest.sqlite3.copied
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER = $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+# B10_FROM_BUILD is necessary to load data source backend from the build tree.
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ TESTDATA_PATH=$(abs_top_srcdir)/src/lib/testutils/testdata \
+ TESTDATA_WRITE_PATH=$(builddir) \
+ B10_FROM_BUILD=$(abs_top_builddir) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
diff --git a/src/lib/python/isc/ddns/tests/session_tests.py b/src/lib/python/isc/ddns/tests/session_tests.py
new file mode 100644
index 0000000..f7c2d3c
--- /dev/null
+++ b/src/lib/python/isc/ddns/tests/session_tests.py
@@ -0,0 +1,1544 @@
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import shutil
+import isc.log
+import unittest
+from isc.dns import *
+from isc.datasrc import DataSourceClient, ZoneFinder
+from isc.ddns.session import *
+from isc.ddns.zone_config import *
+
+# Some common test parameters
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}"
+
+TEST_ZONE_NAME = Name('example.org')
+UPDATE_RRTYPE = RRType.SOA()
+TEST_RRCLASS = RRClass.IN()
+TEST_ZONE_RECORD = Question(TEST_ZONE_NAME, TEST_RRCLASS, UPDATE_RRTYPE)
+TEST_CLIENT6 = ('2001:db8::1', 53, 0, 0)
+TEST_CLIENT4 = ('192.0.2.1', 53)
+# TSIG key for tests when needed. The key name is TEST_ZONE_NAME.
+TEST_TSIG_KEY = TSIGKey("example.org:SFuWd/q99SzF8Yzd1QbB9g==")
+
+def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[],
+ updates=[], tsig_key=None):
+ msg = Message(Message.RENDER)
+ msg.set_qid(5353) # arbitrary chosen
+ msg.set_opcode(Opcode.UPDATE())
+ msg.set_rcode(Rcode.NOERROR())
+ for z in zones:
+ msg.add_question(z)
+ for p in prerequisites:
+ msg.add_rrset(SECTION_PREREQUISITE, p)
+ for u in updates:
+ msg.add_rrset(SECTION_UPDATE, u)
+
+ renderer = MessageRenderer()
+ if tsig_key is not None:
+ msg.to_wire(renderer, TSIGContext(tsig_key))
+ else:
+ msg.to_wire(renderer)
+
+ # re-read the created data in the parse mode
+ msg.clear(Message.PARSE)
+ msg.from_wire(renderer.get_data(), Message.PRESERVE_ORDER)
+
+ return msg
+
+def add_rdata(rrset, rdata):
+ '''
+ Helper function for easily adding Rdata fields to RRsets.
+ This function assumes the given rdata is of type string or bytes,
+ and corresponds to the given rrset
+ '''
+ rrset.add_rdata(isc.dns.Rdata(rrset.get_type(),
+ rrset.get_class(),
+ rdata))
+
+def create_rrset(name, rrclass, rrtype, ttl, rdatas = []):
+ '''
+ Helper method to easily create RRsets, auto-converts
+ name, rrclass, rrtype, and ttl (if possibly through their
+ respective constructors)
+ rdatas is a list of rr data strings, or bytestrings, which
+ should match the RRType of the rrset to create
+ '''
+ if type(name) != Name:
+ name = Name(name)
+ if type(rrclass) != RRClass:
+ rrclass = RRClass(rrclass)
+ if type(rrtype) != RRType:
+ rrtype = RRType(rrtype)
+ if type(ttl) != RRTTL:
+ ttl = RRTTL(ttl)
+ rrset = isc.dns.RRset(name, rrclass, rrtype, ttl)
+ for rdata in rdatas:
+ add_rdata(rrset, rdata)
+ return rrset
+
+class SessionModuleTests(unittest.TestCase):
+ '''Tests for module-level functions in the session.py module'''
+
+ def test_foreach_rr_in_rrset(self):
+ rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600, [ "192.0.2.1" ])
+
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n"], l)
+
+ add_rdata(rrset, "192.0.2.2")
+ add_rdata(rrset, "192.0.2.3")
+
+ # but through the generator, there should be several 1-line entries
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n",
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ "www.example.org. 3600 IN A 192.0.2.3\n",
+ ], l)
+
+ def test_convert_rrset_class(self):
+ # Converting an RRSET to a different class should work
+ # if the rdata types can be converted
+ rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ 3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
+
+ rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ str(rrset2))
+
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
+ self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 CLASS254 " +
+ "A \\# 4 c0000202\n",
+ str(rrset3))
+
+ # depending on what type of bad data is given, a number
+ # of different exceptions could be raised (TODO: i recall
+ # there was a ticket about making a better hierarchy for
+ # dns/parsing related exceptions)
+ self.assertRaises(InvalidRdataLength, convert_rrset_class,
+ rrset, RRClass.CH())
+ add_rdata(rrset, b'\xc0\x00')
+ self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
+ rrset, RRClass.IN())
+
+ def test_collect_rrsets(self):
+ '''
+ Tests the 'rrset collector' method, which collects rrsets
+ with the same name and type
+ '''
+ collected = []
+
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ]))
+ # Same name and class, different type
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "two" ]))
+ # Same class and type as an existing one, different name
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.3" ]))
+ # Same name and type as an existing one, different class
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "two" ]))
+
+ strings = [ rrset.to_text() for rrset in collected ]
+ # note + vs , in this list
+ expected = ['a.example.org. 0 IN A 192.0.2.1\n' +
+ 'a.example.org. 0 IN A 192.0.2.2\n',
+ 'a.example.org. 0 IN TXT "one"\n' +
+ 'a.example.org. 0 IN TXT "two"\n',
+ 'b.example.org. 0 IN A 192.0.2.3\n' +
+ 'b.example.org. 0 IN A 192.0.2.4\n',
+ 'a.example.org. 0 CH TXT "one"\n' +
+ 'a.example.org. 0 CH TXT "two"\n']
+
+ self.assertEqual(expected, strings)
+
+class SessionTestBase(unittest.TestCase):
+ '''Base class for all sesion related tests.
+
+ It just initializes common test parameters in its setUp() and defines
+ some common utility method(s).
+
+ '''
+ def setUp(self):
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self._datasrc_client = DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self._update_msg = create_update_msg()
+ self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "ACCEPT"}])}
+ self._session = UpdateSession(self._update_msg, TEST_CLIENT4,
+ ZoneConfig(set(), TEST_RRCLASS,
+ self._datasrc_client,
+ self._acl_map))
+ self._session._get_update_zone()
+ self._session._create_diff()
+
+ def tearDown(self):
+ # With the Updater created in _get_update_zone, and tests
+ # doing all kinds of crazy stuff, one might get database locked
+ # errors if it doesn't clean up explicitely after each test
+ self._session = None
+
+ def check_response(self, msg, expected_rcode):
+ '''Perform common checks on update resposne message.'''
+ self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_QR))
+ # note: we convert opcode to text it'd be more helpful on failure.
+ self.assertEqual(Opcode.UPDATE().to_text(), msg.get_opcode().to_text())
+ self.assertEqual(expected_rcode.to_text(), msg.get_rcode().to_text())
+ # All sections should be cleared
+ self.assertEqual(0, msg.get_rr_count(SECTION_ZONE))
+ self.assertEqual(0, msg.get_rr_count(SECTION_PREREQUISITE))
+ self.assertEqual(0, msg.get_rr_count(SECTION_UPDATE))
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL))
+
+class TestDDNSSOA(unittest.TestCase):
+ '''unittest for the DDNS_SOA'''
+ def test_update_soa(self):
+ '''unittest for update_soa function'''
+ soa_update = DDNS_SOA()
+ soa_rr = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200"])
+ expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. "
+ + "admin.example.org. " +
+ "1234 3600 1800 2419200 7200"])
+ self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
+ expected_soa_rr.get_rdata()[0].to_text())
+ max_serial = 2 ** 32 - 1
+ soa_rdata = "%d %s"%(max_serial,"3600 1800 2419200 7200")
+ soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), 3600,
+ ["ns1.example.org. " + "admin.example.org. " +
+ soa_rdata])
+ expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. "
+ + "admin.example.org. " +
+ "1 3600 1800 2419200 7200"])
+ self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
+ expected_soa_rr.get_rdata()[0].to_text())
+
+ def test_soa_update_check(self):
+ '''unittest for soa_update_check function'''
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200"])
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1234 3600 1800 2419200 7200"])
+ soa_update = DDNS_SOA()
+ # The case of (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) in rfc 1982
+ self.assertTrue(soa_update.soa_update_check(small_soa_rr,
+ large_soa_rr))
+ self.assertFalse(soa_update.soa_update_check(large_soa_rr,
+ small_soa_rr))
+ small_serial = 1235 + 2 ** 31
+ soa_rdata = "%d %s"%(small_serial,"3600 1800 2419200 7200")
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ soa_rdata])
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1234 3600 1800 2419200 7200"])
+ # The case of (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1)) in rfc 1982
+ self.assertTrue(soa_update.soa_update_check(small_soa_rr,
+ large_soa_rr))
+ self.assertFalse(soa_update.soa_update_check(large_soa_rr,
+ small_soa_rr))
+
+class SessionTest(SessionTestBase):
+ '''Basic session tests'''
+
+ def test_handle(self):
+ '''Basic update case'''
+ result, zname, zclass = self._session.handle()
+ self.assertEqual(UPDATE_SUCCESS, result)
+ self.assertEqual(TEST_ZONE_NAME, zname)
+ self.assertEqual(TEST_RRCLASS, zclass)
+
+ # Just checking these are different from the success code.
+ self.assertNotEqual(UPDATE_ERROR, result)
+ self.assertNotEqual(UPDATE_DROP, result)
+
+ def test_broken_request(self):
+ # Zone section is empty
+ msg = create_update_msg(zones=[])
+ session = UpdateSession(msg, TEST_CLIENT6, None)
+ result, zname, zclass = session.handle()
+ self.assertEqual(UPDATE_ERROR, result)
+ self.assertEqual(None, zname)
+ self.assertEqual(None, zclass)
+ self.check_response(session.get_message(), Rcode.FORMERR())
+
+ # Zone section contains multiple records
+ msg = create_update_msg(zones=[TEST_ZONE_RECORD, TEST_ZONE_RECORD])
+ session = UpdateSession(msg, TEST_CLIENT4, None)
+ self.assertEqual(UPDATE_ERROR, session.handle()[0])
+ self.check_response(session.get_message(), Rcode.FORMERR())
+
+ # Zone section's type is not SOA
+ msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.A())])
+ session = UpdateSession(msg, TEST_CLIENT4, None)
+ self.assertEqual(UPDATE_ERROR, session.handle()[0])
+ self.check_response(session.get_message(), Rcode.FORMERR())
+
+ def test_update_secondary(self):
+ # specified zone is configured as a secondary. Since this
+ # implementation doesn't support update forwarding, the result
+ # should be NOTIMP.
+ msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.SOA())])
+ session = UpdateSession(msg, TEST_CLIENT4,
+ ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ TEST_RRCLASS, self._datasrc_client))
+ self.assertEqual(UPDATE_ERROR, session.handle()[0])
+ self.check_response(session.get_message(), Rcode.NOTIMP())
+
+ def check_notauth(self, zname, zclass=TEST_RRCLASS):
+ '''Common test sequence for the 'notauth' test'''
+ msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA())])
+ session = UpdateSession(msg, TEST_CLIENT4,
+ ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ TEST_RRCLASS, self._datasrc_client))
+ self.assertEqual(UPDATE_ERROR, session.handle()[0])
+ self.check_response(session.get_message(), Rcode.NOTAUTH())
+
+ def test_update_notauth(self):
+ '''Update attempt for non authoritative zones'''
+ # zone name doesn't match
+ self.check_notauth(Name('example.com'))
+ # zone name is a subdomain of the actual authoritative zone
+ # (match must be exact)
+ self.check_notauth(Name('sub.example.org'))
+ # zone class doesn't match
+ self.check_notauth(Name('example.org'), RRClass.CH())
+
+ def test_update_datasrc_error(self):
+ # if the data source client raises an exception, it should result in
+ # a SERVFAIL.
+ class BadDataSourceClient:
+ def find_zone(self, name):
+ raise isc.datasrc.Error('faked exception')
+ msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.SOA())])
+ session = UpdateSession(msg, TEST_CLIENT4,
+ ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ TEST_RRCLASS,
+ BadDataSourceClient()))
+ self.assertEqual(UPDATE_ERROR, session.handle()[0])
+ self.check_response(session.get_message(), Rcode.SERVFAIL())
+
+ def test_foreach_rr_in_rrset(self):
+ rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600, [ "192.0.2.1" ])
+
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n"], l)
+
+ add_rdata(rrset, "192.0.2.2")
+ add_rdata(rrset, "192.0.2.3")
+
+ # but through the generator, there should be several 1-line entries
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n",
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ "www.example.org. 3600 IN A 192.0.2.3\n",
+ ], l)
+
+ def test_convert_rrset_class(self):
+ # Converting an RRSET to a different class should work
+ # if the rdata types can be converted
+ rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ 3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
+
+ rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ str(rrset2))
+
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
+ self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 CLASS254 " +
+ "A \\# 4 c0000202\n",
+ str(rrset3))
+
+ # depending on what type of bad data is given, a number
+ # of different exceptions could be raised (TODO: i recall
+ # there was a ticket about making a better hierarchy for
+ # dns/parsing related exceptions)
+ self.assertRaises(InvalidRdataLength, convert_rrset_class,
+ rrset, RRClass.CH())
+ add_rdata(rrset, b'\xc0\x00')
+ self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
+ rrset, RRClass.IN())
+
+ def test_collect_rrsets(self):
+ '''
+ Tests the 'rrset collector' method, which collects rrsets
+ with the same name and type
+ '''
+ collected = []
+
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ]))
+ # Same name and class, different type
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "two" ]))
+ # Same class and type as an existing one, different name
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.3" ]))
+ # Same name and type as an existing one, different class
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "two" ]))
+
+ strings = [ rrset.to_text() for rrset in collected ]
+ # note + vs , in this list
+ expected = ['a.example.org. 0 IN A 192.0.2.1\n' +
+ 'a.example.org. 0 IN A 192.0.2.2\n',
+ 'a.example.org. 0 IN TXT "one"\n' +
+ 'a.example.org. 0 IN TXT "two"\n',
+ 'b.example.org. 0 IN A 192.0.2.3\n' +
+ 'b.example.org. 0 IN A 192.0.2.4\n',
+ 'a.example.org. 0 CH TXT "one"\n' +
+ 'a.example.org. 0 CH TXT "two"\n']
+
+ self.assertEqual(expected, strings)
+
+ def __prereq_helper(self, method, expected, rrset):
+ '''Calls the given method with self._datasrc_client
+ and the given rrset, and compares the return value.
+ Function does not do much but makes the code look nicer'''
+ self.assertEqual(expected, method(rrset))
+
+ def __check_prerequisite_exists_combined(self, method, rrclass, expected):
+ '''shared code for the checks for the very similar (but reversed
+ in behaviour) methods __prereq_rrset_exists and
+ __prereq_rrset_does_not_exist.
+ For rrset_exists, rrclass should be ANY, for rrset_does_not_exist,
+ it should be NONE.
+ '''
+ # Basic existence checks
+ # www.example.org should have an A, but not an MX
+ rrset = create_rrset("www.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, expected, rrset)
+ rrset = create_rrset("www.example.org", rrclass, RRType.MX(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ # example.org should have an MX, but not an A
+ rrset = create_rrset("example.org", rrclass, RRType.MX(), 0)
+ self.__prereq_helper(method, expected, rrset)
+ rrset = create_rrset("example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ # Also check the case where the name does not even exist
+ rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ # Wildcard expansion should not be applied, but literal matches
+ # should work
+ rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, expected, rrset)
+
+ # Likewise, CNAME directly should match, but what it points to should
+ # not
+ rrset = create_rrset("cname.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME(), 0)
+ self.__prereq_helper(method, expected, rrset)
+
+ # And also make sure a delegation (itself) is not treated as existing
+ # data
+ rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+ # But the delegation data itself should match
+ rrset = create_rrset("sub.example.org", rrclass, RRType.NS(), 0)
+ self.__prereq_helper(method, expected, rrset)
+ # As should glue
+ rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A(), 0)
+ self.__prereq_helper(method, expected, rrset)
+
+ def test_check_prerequisite_exists(self):
+ method = self._session._UpdateSession__prereq_rrset_exists
+ self.__check_prerequisite_exists_combined(method,
+ RRClass.ANY(),
+ True)
+
+ def test_check_prerequisite_does_not_exist(self):
+ method = self._session._UpdateSession__prereq_rrset_does_not_exist
+ self.__check_prerequisite_exists_combined(method,
+ RRClass.NONE(),
+ False)
+
+ def test_check_prerequisite_exists_value(self):
+ method = self._session._UpdateSession__prereq_rrset_exists_value
+
+ rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 0)
+ # empty one should not match
+ self.__prereq_helper(method, False, rrset)
+
+ # When the rdata is added, it should match
+ add_rdata(rrset, "192.0.2.1")
+ self.__prereq_helper(method, True, rrset)
+
+ # But adding more should not
+ add_rdata(rrset, "192.0.2.2")
+ self.__prereq_helper(method, False, rrset)
+
+ # Also test one with more than one RR
+ rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
+ self.__prereq_helper(method, False, rrset)
+ add_rdata(rrset, "ns1.example.org.")
+ self.__prereq_helper(method, False, rrset)
+ add_rdata(rrset, "ns2.example.org")
+ self.__prereq_helper(method, False, rrset)
+ add_rdata(rrset, "ns3.example.org.")
+ self.__prereq_helper(method, True, rrset)
+ add_rdata(rrset, "ns4.example.org.")
+ self.__prereq_helper(method, False, rrset)
+
+ # Repeat that, but try a different order of Rdata addition
+ rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
+ self.__prereq_helper(method, False, rrset)
+ add_rdata(rrset, "ns3.example.org.")
+ self.__prereq_helper(method, False, rrset)
+ add_rdata(rrset, "ns2.example.org.")
+ self.__prereq_helper(method, False, rrset)
+ add_rdata(rrset, "ns1.example.org.")
+ self.__prereq_helper(method, True, rrset)
+ add_rdata(rrset, "ns4.example.org.")
+ self.__prereq_helper(method, False, rrset)
+
+ # and test one where the name does not even exist
+ rrset = create_rrset("doesnotexist.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ])
+ self.__prereq_helper(method, False, rrset)
+
+ def __check_prerequisite_name_in_use_combined(self, method, rrclass,
+ expected):
+ '''shared code for the checks for the very similar (but reversed
+ in behaviour) methods __prereq_name_in_use and
+ __prereq_name_not_in_use
+ '''
+ rrset = create_rrset("example.org", rrclass, RRType.ANY(), 0)
+ self.__prereq_helper(method, expected, rrset)
+
+ rrset = create_rrset("www.example.org", rrclass, RRType.ANY(), 0)
+ self.__prereq_helper(method, expected, rrset)
+
+ rrset = create_rrset("doesnotexist.example.org", rrclass,
+ RRType.ANY(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ rrset = create_rrset("belowdelegation.sub.example.org", rrclass,
+ RRType.ANY(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ rrset = create_rrset("foo.wildcard.example.org", rrclass,
+ RRType.ANY(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+
+ # empty nonterminal should not match
+ rrset = create_rrset("nonterminal.example.org", rrclass,
+ RRType.ANY(), 0)
+ self.__prereq_helper(method, not expected, rrset)
+ rrset = create_rrset("empty.nonterminal.example.org", rrclass,
+ RRType.ANY(), 0)
+ self.__prereq_helper(method, expected, rrset)
+
+ def test_check_prerequisite_name_in_use(self):
+ method = self._session._UpdateSession__prereq_name_in_use
+ self.__check_prerequisite_name_in_use_combined(method,
+ RRClass.ANY(),
+ True)
+
+ def test_check_prerequisite_name_not_in_use(self):
+ method = self._session._UpdateSession__prereq_name_not_in_use
+ self.__check_prerequisite_name_in_use_combined(method,
+ RRClass.NONE(),
+ False)
+
+ def check_prerequisite_result(self, expected, prerequisites):
+ '''Helper method for checking the result of a prerequisite check;
+ creates an update session, and fills it with the list of rrsets
+ from 'prerequisites'. Then checks if __check_prerequisites()
+ returns the Rcode specified in 'expected'.'''
+ msg = create_update_msg([TEST_ZONE_RECORD], prerequisites)
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client,
+ self._acl_map)
+ session = UpdateSession(msg, TEST_CLIENT4, zconfig)
+ session._get_update_zone()
+ session._create_diff()
+ # compare the to_text output of the rcodes (nicer error messages)
+ # This call itself should also be done by handle(),
+ # but just for better failures, it is first called on its own
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__check_prerequisites().to_text())
+ # Now see if handle finds the same result
+ (result, _, _) = session.handle()
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__message.get_rcode().to_text())
+ # And that the result looks right
+ if expected == Rcode.NOERROR():
+ self.assertEqual(UPDATE_SUCCESS, result)
+ else:
+ self.assertEqual(UPDATE_ERROR, result)
+
+ def check_prescan_result(self, expected, updates, expected_soa = None):
+ '''Helper method for checking the result of a prerequisite check;
+ creates an update session, and fills it with the list of rrsets
+ from 'updates'. Then checks if __do_prescan()
+ returns the Rcode specified in 'expected'.'''
+ msg = create_update_msg([TEST_ZONE_RECORD], [], updates)
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client,
+ self._acl_map)
+ session = UpdateSession(msg, TEST_CLIENT4, zconfig)
+ session._get_update_zone()
+ session._create_diff()
+ # compare the to_text output of the rcodes (nicer error messages)
+ # This call itself should also be done by handle(),
+ # but just for better failures, it is first called on its own
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__do_prescan().to_text())
+ # If there is an expected soa, check it
+ self.assertEqual(str(expected_soa),
+ str(session._UpdateSession__added_soa))
+
+ def check_full_handle_result(self, expected, updates, prerequisites=[]):
+ '''Helper method for checking the result of a full handle;
+ creates an update session, and fills it with the list of rrsets
+ from 'updates'. Then checks if __handle()
+ results in a response with rcode 'expected'.'''
+ msg = create_update_msg([TEST_ZONE_RECORD], prerequisites, updates)
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client,
+ self._acl_map)
+ session = UpdateSession(msg, TEST_CLIENT4, zconfig)
+
+ # Now see if handle finds the same result
+ (result, _, _) = session.handle()
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__message.get_rcode().to_text())
+ # And that the result looks right
+ if expected == Rcode.NOERROR():
+ self.assertEqual(UPDATE_SUCCESS, result)
+ else:
+ self.assertEqual(UPDATE_ERROR, result)
+
+ def test_check_prerequisites(self):
+ # This test checks if the actual prerequisite-type-specific
+ # methods are called.
+ # It does test all types of prerequisites, but it does not test
+ # every possible result for those types (those are tested above,
+ # in the specific prerequisite type tests)
+
+ # Let's first define a number of prereq's that should succeed
+ rrset_exists_yes = create_rrset("example.org", RRClass.ANY(),
+ RRType.SOA(), 0)
+
+ rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ])
+
+ rrset_does_not_exist_yes = create_rrset("foo.example.org",
+ RRClass.NONE(), RRType.SOA(),
+ 0)
+
+ name_in_use_yes = create_rrset("www.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+
+ name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE(),
+ RRType.ANY(), 0)
+
+ rrset_exists_value_1 = create_rrset("example.org", RRClass.IN(),
+ RRType.NS(), 0,
+ [ "ns1.example.org" ])
+ rrset_exists_value_2 = create_rrset("example.org", RRClass.IN(),
+ RRType.NS(), 0,
+ [ "ns2.example.org" ])
+ rrset_exists_value_3 = create_rrset("example.org", RRClass.IN(),
+ RRType.NS(), 0,
+ [ "ns3.example.org" ])
+
+ # and a number that should not
+ rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY(),
+ RRType.SOA(), 0)
+
+ rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.2" ])
+
+ rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE(),
+ RRType.SOA(), 0)
+
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+
+ name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE(),
+ RRType.ANY(), 0)
+ # check 'no' result codes
+ self.check_prerequisite_result(Rcode.NXRRSET(),
+ [ rrset_exists_no ])
+ self.check_prerequisite_result(Rcode.NXRRSET(),
+ [ rrset_exists_value_no ])
+ self.check_prerequisite_result(Rcode.YXRRSET(),
+ [ rrset_does_not_exist_no ])
+ self.check_prerequisite_result(Rcode.NXDOMAIN(),
+ [ name_in_use_no ])
+ self.check_prerequisite_result(Rcode.YXDOMAIN(),
+ [ name_not_in_use_no ])
+
+ # the 'yes' codes should result in ok
+ # individually
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_yes ] )
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_value_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_does_not_exist_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ name_in_use_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ name_not_in_use_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_value_1,
+ rrset_exists_value_2,
+ rrset_exists_value_3])
+
+ # and together
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_yes,
+ rrset_exists_value_yes,
+ rrset_does_not_exist_yes,
+ name_in_use_yes,
+ name_not_in_use_yes,
+ rrset_exists_value_1,
+ rrset_exists_value_2,
+ rrset_exists_value_3])
+
+ # try out a permutation, note that one rrset is split up,
+ # and the order of the RRs should not matter
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_value_3,
+ rrset_exists_yes,
+ rrset_exists_value_2,
+ name_in_use_yes,
+ rrset_exists_value_1])
+
+ # Should fail on the first error, even if most of the
+ # prerequisites are ok
+ self.check_prerequisite_result(Rcode.NXDOMAIN(),
+ [ rrset_exists_value_3,
+ rrset_exists_yes,
+ rrset_exists_value_2,
+ name_in_use_yes,
+ name_in_use_no,
+ rrset_exists_value_1])
+
+ def test_prerequisite_notzone(self):
+ rrset = create_rrset("some.other.zone.", RRClass.ANY(), RRType.SOA(), 0)
+ self.check_prerequisite_result(Rcode.NOTZONE(), [ rrset ])
+
+ def test_prerequisites_formerr(self):
+ # test for form errors in the prerequisite section
+
+ # Class ANY, non-zero TTL
+ rrset = create_rrset("example.org", RRClass.ANY(), RRType.SOA(), 1)
+ self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+
+ # Class ANY, but with rdata
+ rrset = create_rrset("example.org", RRClass.ANY(), RRType.A(), 0,
+ [ b'\x00\x00\x00\x00' ])
+ self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+
+ # Class NONE, non-zero TTL
+ rrset = create_rrset("example.org", RRClass.NONE(), RRType.SOA(), 1)
+ self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+
+ # Class NONE, but with rdata
+ rrset = create_rrset("example.org", RRClass.NONE(), RRType.A(), 0,
+ [ b'\x00\x00\x00\x00' ])
+ self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+
+ # Matching class and type, but non-zero TTL
+ rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 1,
+ [ "192.0.2.1" ])
+ self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+
+ # Completely different class
+ rrset = create_rrset("example.org", RRClass.CH(), RRType.TXT(), 0,
+ [ "foo" ])
+ self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+
+ def __prereq_helper(self, method, expected, rrset):
+ '''Calls the given method with self._datasrc_client
+ and the given rrset, and compares the return value.
+ Function does not do much but makes the code look nicer'''
+ self.assertEqual(expected, method(rrset))
+
+ def __initialize_update_rrsets(self):
+ '''Prepare a number of RRsets to be used in several update tests
+ The rrsets are stored in self'''
+ orig_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600, [ "192.0.2.1" ])
+ self.orig_a_rrset = orig_a_rrset
+
+ rrset_update_a = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600,
+ [ "192.0.2.2", "192.0.2.3" ])
+ self.rrset_update_a = rrset_update_a
+
+ rrset_update_soa = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200" ])
+ self.rrset_update_soa = rrset_update_soa
+
+ rrset_update_soa_del = create_rrset("example.org", RRClass.NONE(),
+ RRType.SOA(), 0,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200" ])
+ self.rrset_update_soa_del = rrset_update_soa_del
+
+ rrset_update_soa2 = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "4000 3600 1800 2419200 7200" ])
+ self.rrset_update_soa2 = rrset_update_soa2
+
+ rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+ self.rrset_update_del_name = rrset_update_del_name
+
+ rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+ self.rrset_update_del_name_apex = rrset_update_del_name_apex
+
+ rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY(),
+ RRType.A(), 0)
+ self.rrset_update_del_rrset = rrset_update_del_rrset
+
+ rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.MX(), 0)
+ self.rrset_update_del_mx_apex = rrset_update_del_mx_apex
+
+ rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.SOA(), 0)
+ self.rrset_update_del_soa_apex = rrset_update_del_soa_apex
+
+ rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.NS(), 0)
+ self.rrset_update_del_ns_apex = rrset_update_del_ns_apex
+
+ rrset_update_del_rrset_part = create_rrset("www.example.org",
+ RRClass.NONE(), RRType.A(),
+ 0,
+ [ b'\xc0\x00\x02\x02',
+ b'\xc0\x00\x02\x03' ])
+ self.rrset_update_del_rrset_part = rrset_update_del_rrset_part
+
+ rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE(),
+ RRType.NS(), 0,
+ [ b'\x03ns1\x07example\x03org\x00',
+ b'\x03ns2\x07example\x03org\x00',
+ b'\x03ns3\x07example\x03org\x00' ])
+ self.rrset_update_del_rrset_ns = rrset_update_del_rrset_ns
+
+ rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE(),
+ RRType.MX(), 0,
+ [ b'\x00\x0a\x04mail\x07example\x03org\x00' ])
+ self.rrset_update_del_rrset_mx = rrset_update_del_rrset_mx
+
+ def test_acl_before_prereq(self):
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+
+ # Test a prerequisite that would fail
+ self.check_full_handle_result(Rcode.NXDOMAIN(), [], [ name_in_use_no ])
+
+ # Change ACL so that it would be denied
+ self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "REJECT"}])}
+
+ # The prerequisite should now not be reached; it should fail on the
+ # ACL
+ self.check_full_handle_result(Rcode.REFUSED(), [], [ name_in_use_no ])
+
+ def test_prescan(self):
+ '''Test whether the prescan succeeds on data that is ok, and whether
+ if notices the SOA if present'''
+ # prepare a set of correct update statements
+ self.__initialize_update_rrsets()
+
+ self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+
+ # check if soa is noticed
+ self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa ],
+ self.rrset_update_soa)
+
+ # Other types of succesful prechecks
+ self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa2 ],
+ self.rrset_update_soa2)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name_apex ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_mx_apex ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_part ])
+
+ # and check a few permutations of the above
+ # all of them (with one of the soas)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [
+ self.rrset_update_a,
+ self.rrset_update_soa,
+ self.rrset_update_del_name,
+ self.rrset_update_del_name_apex,
+ self.rrset_update_del_rrset,
+ self.rrset_update_del_mx_apex,
+ self.rrset_update_del_rrset_part
+ ],
+ self.rrset_update_soa)
+
+ # Two soas. Should we reject or simply use the last?
+ # (RFC is not really explicit on this, but between the lines I read
+ # use the last)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_soa,
+ self.rrset_update_soa2 ],
+ self.rrset_update_soa2)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_soa2,
+ self.rrset_update_soa ],
+ self.rrset_update_soa)
+
+ self.check_prescan_result(Rcode.NOERROR(),
+ [
+ self.rrset_update_del_mx_apex,
+ self.rrset_update_del_name,
+ self.rrset_update_del_name_apex,
+ self.rrset_update_del_rrset_part,
+ self.rrset_update_a,
+ self.rrset_update_del_rrset,
+ self.rrset_update_soa
+ ],
+ self.rrset_update_soa)
+
+ def test_prescan_failures(self):
+ '''Test whether prescan fails on bad data'''
+ # out of zone data
+ rrset = create_rrset("different.zone", RRClass.ANY(), RRType.TXT(), 0)
+ self.check_prescan_result(Rcode.NOTZONE(), [ rrset ])
+
+ # forbidden type, zone class
+ rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY(), 0,
+ [ b'\x00' ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # non-zero TTL, class ANY
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 1)
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # non-zero Rdata, class ANY
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 0,
+ [ "foo" ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # forbidden type, class ANY
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.AXFR(), 0,
+ [ b'\x00' ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # non-zero TTL, class NONE
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.TXT(), 1)
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # forbidden type, class NONE
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.AXFR(), 0,
+ [ b'\x00' ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ def __check_inzone_data(self, expected_result, name, rrtype,
+ expected_rrset = None):
+ '''Does a find on TEST_ZONE for the given rrset's name and type,
+ then checks if the result matches the expected result.
+ If so, and if expected_rrset is given, they are compared as
+ well.'''
+ _, finder = self._datasrc_client.find_zone(TEST_ZONE_NAME)
+ result, found_rrset, _ = finder.find(name, rrtype,
+ finder.NO_WILDCARD |
+ finder.FIND_GLUE_OK)
+ self.assertEqual(expected_result, result)
+ # Sigh. Need rrsets.compare() again.
+ # To be sure, compare name, class, type, and ttl
+ if expected_rrset is not None:
+ self.assertEqual(expected_rrset.get_name(), found_rrset.get_name())
+ self.assertEqual(expected_rrset.get_class(), found_rrset.get_class())
+ self.assertEqual(expected_rrset.get_type(), found_rrset.get_type())
+ self.assertEqual(expected_rrset.get_ttl().to_text(),
+ found_rrset.get_ttl().to_text())
+ expected_rdata =\
+ [ rdata.to_text() for rdata in expected_rrset.get_rdata() ]
+ found_rdata =\
+ [ rdata.to_text() for rdata in found_rrset.get_rdata() ]
+ expected_rdata.sort()
+ found_rdata.sort()
+ self.assertEqual(expected_rdata, found_rdata)
+
+ def test_update_add_delete_rrset(self):
+ '''
+ Tests a sequence of related add and delete updates. Some other
+ cases are tested by later tests.
+ '''
+ self.__initialize_update_rrsets()
+
+ # initially, the www should only contain one rr
+ # (set to self.orig_a_rrset)
+
+ # during this test, we will extend it at some point
+ extended_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600,
+ [ "192.0.2.1",
+ "192.0.2.2",
+ "192.0.2.3" ])
+
+ # Sanity check, make sure original data is really there before updates
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ # Add two rrs
+ self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ extended_a_rrset)
+
+ # Adding the same RRsets should not make a difference.
+ self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ extended_a_rrset)
+
+ # Now delete those two, and we should end up with the original RRset
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_part ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ # 'Deleting' them again should make no difference
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_part ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ # But deleting the entire rrset, independent of its contents, should
+ # work
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ # Check that if we update the SOA, it is updated to our value
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_soa2 ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.SOA(),
+ self.rrset_update_soa2)
+
+ def test_glue_deletions(self):
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("sub.example.org."),
+ RRType.NS())
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("ns.sub.example.org."),
+ RRType.A())
+
+ # See that we can delete glue
+ rrset_delete_glue = create_rrset("ns.sub.example.org.",
+ RRClass.ANY(),
+ RRType.A(),
+ 0)
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ rrset_delete_glue ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("sub.example.org."),
+ RRType.NS())
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("ns.sub.example.org."),
+ RRType.A())
+
+ # Check that we don't accidentally delete a delegation if we
+ # try to delete non-existent glue
+ rrset_delete_nonexistent_glue = create_rrset("foo.sub.example.org.",
+ RRClass.ANY(),
+ RRType.A(),
+ 0)
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ rrset_delete_nonexistent_glue ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("sub.example.org."),
+ RRType.NS())
+
+ def test_update_add_new_data(self):
+ '''
+ This tests adds data where none is present
+ '''
+ # Add data at a completely new name
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("new.example.org"),
+ RRType.A())
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.1", "192.0.2.2" ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new.example.org"),
+ RRType.A(),
+ rrset)
+
+ # Also try a name where data is present, but none of this
+ # specific type
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
+ isc.dns.Name("new.example.org"),
+ RRType.TXT())
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT(),
+ 3600, [ "foo" ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new.example.org"),
+ RRType.TXT(),
+ rrset)
+
+ def test_update_add_new_data_interspersed(self):
+ '''
+ This tests adds data where none is present, similar to
+ test_update_add_new_data, but this time the second RRset
+ is put into the record between the two RRs of the first
+ RRset.
+ '''
+ # Add data at a completely new name
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("new_a.example.org"),
+ RRType.A())
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("new_txt.example.org"),
+ RRType.TXT())
+
+ rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.1" ])
+
+ rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT(),
+ 3600, [ "foo" ])
+
+ rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.2" ])
+
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ rrset1, rrset2, rrset3 ])
+
+ # The update should have merged rrset1 and rrset3
+ rrset_merged = create_rrset("new_a.example.org", TEST_RRCLASS,
+ RRType.A(), 3600,
+ [ "192.0.2.1", "192.0.2.2" ])
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new_a.example.org"),
+ RRType.A(),
+ rrset_merged)
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new_txt.example.org"),
+ RRType.TXT(),
+ rrset2)
+
+ def test_update_delete_name(self):
+ '''
+ Tests whether deletion of every RR for a name works
+ '''
+ self.__initialize_update_rrsets()
+
+ # First check it is there
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ # Delete the entire name
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ # Should still be gone after pointless second delete
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ def test_update_apex_special_cases(self):
+ '''
+ Tests a few special cases when deleting data from the apex
+ '''
+ self.__initialize_update_rrsets()
+
+ # the original SOA
+ orig_soa_rrset = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1234 3600 1800 2419200 7200" ])
+ # At some point, the SOA SERIAL will be auto-incremented
+ incremented_soa_rrset_01 = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1235 3600 1800 2419200 7200" ])
+ incremented_soa_rrset_02 = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1236 3600 1800 2419200 7200" ])
+
+ # We will delete some of the NS records
+ orig_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
+ RRType.NS(), 3600,
+ [ "ns1.example.org.",
+ "ns2.example.org.",
+ "ns3.example.org." ])
+
+ # Sanity check, make sure original data is really there before updates
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ orig_ns_rrset)
+ # We will delete the MX record later in this test, so let's make
+ # sure that it exists (we do not care about its value)
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+
+ # Check that we cannot delete the SOA record by direct deletion
+ # both by name+type and by full rrset
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_soa_apex,
+ self.rrset_update_soa_del ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.SOA(),
+ incremented_soa_rrset_01)
+
+ # If we delete everything at the apex, the SOA and NS rrsets should be
+ # untouched (but serial will be incremented)
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name_apex ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.SOA(),
+ incremented_soa_rrset_02)
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ orig_ns_rrset)
+ # but the MX should be gone
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+
+ # Deleting the NS rrset by name and type only, it should also be left
+ # untouched
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_ns_apex ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ orig_ns_rrset)
+
+ def test_update_apex_special_case_ns_rrset(self):
+ # If we delete the NS at the apex specifically, it should still
+ # keep one record
+ self.__initialize_update_rrsets()
+ # When we are done, we should have a reduced NS rrset
+ short_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
+ RRType.NS(), 3600,
+ [ "ns3.example.org." ])
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_ns ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ short_ns_rrset)
+
+ def test_update_apex_special_case_ns_rrset2(self):
+ # If we add new NS records, then delete all existing ones, it
+ # should not keep any
+ self.__initialize_update_rrsets()
+ new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS(), 3600,
+ [ "newns1.example.org", "newns2.example.org" ])
+
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ new_ns,
+ self.rrset_update_del_rrset_ns ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ new_ns)
+
+ def test_update_delete_normal_rrset_at_apex(self):
+ '''
+ Tests a number of 'normal rrset' deletes at the apex
+ '''
+
+ # MX should simply be deleted
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_mx ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+
+ def test_update_add_then_delete_rrset(self):
+ # If we add data, then delete the whole rrset, added data should
+ # be gone as well
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_a,
+ self.rrset_update_del_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ def test_update_add_then_delete_name(self):
+ # If we add data, then delete the entire name, added data should
+ # be gone as well
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_a,
+ self.rrset_update_del_name ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ def test_update_delete_then_add_rrset(self):
+ # If we delete an entire rrset, then add something there again,
+ # the addition should be done
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset,
+ self.rrset_update_a ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.rrset_update_a)
+
+ def test_update_delete_then_add_rrset(self):
+ # If we delete an entire name, then add something there again,
+ # the addition should be done
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name,
+ self.rrset_update_a ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.rrset_update_a)
+
+ def test_update_cname_special_cases(self):
+ self.__initialize_update_rrsets()
+
+ # Sanity check
+ orig_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
+ RRType.CNAME(), 3600,
+ [ "www.example.org." ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
+ isc.dns.Name("cname.example.org"),
+ RRType.A(),
+ orig_cname_rrset)
+
+ # If we try to add data where a cname is preset
+ rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.1" ])
+
+ self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
+ isc.dns.Name("cname.example.org"),
+ RRType.A(),
+ orig_cname_rrset)
+
+ # But updating the cname itself should work
+ new_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
+ RRType.CNAME(), 3600,
+ [ "mail.example.org." ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
+ isc.dns.Name("cname.example.org"),
+ RRType.A(),
+ new_cname_rrset)
+
+ self.__initialize_update_rrsets()
+
+ # Likewise, adding a cname where other data is
+ # present should do nothing either
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+ new_cname_rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.CNAME(), 3600,
+ [ "mail.example.org." ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ def test_update_bad_class(self):
+ rrset = create_rrset("example.org.", RRClass.CH(), RRType.TXT(), 0,
+ [ "foo" ])
+ self.check_full_handle_result(Rcode.FORMERR(), [ rrset ])
+
+ def test_uncaught_exception(self):
+ def my_exc():
+ raise Exception("foo")
+ self._session._UpdateSession__update_soa = my_exc
+ self.assertEqual(Rcode.SERVFAIL().to_text(),
+ self._session._UpdateSession__do_update().to_text())
+
+class SessionACLTest(SessionTestBase):
+ '''ACL related tests for update session.'''
+ def test_update_acl_check(self):
+ '''Test for various ACL checks.
+
+ Note that accepted cases are covered in the basic tests.
+
+ '''
+ # create a separate session, with default (empty) ACL map.
+ session = UpdateSession(self._update_msg,
+ TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS,
+ self._datasrc_client))
+ # then the request should be rejected.
+ self.assertEqual((UPDATE_ERROR, None, None), session.handle())
+
+ # recreate the request message, and test with an ACL that would result
+ # in 'DROP'. get_message() should return None.
+ msg = create_update_msg()
+ acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "DROP", "from":
+ TEST_CLIENT4[0]}])}
+ session = UpdateSession(msg, TEST_CLIENT4,
+ ZoneConfig([], TEST_RRCLASS,
+ self._datasrc_client, acl_map))
+ self.assertEqual((UPDATE_DROP, None, None), session.handle())
+ self.assertEqual(None, session.get_message())
+
+ def test_update_tsigacl_check(self):
+ '''Test for various ACL checks using TSIG.'''
+ # This ACL will accept requests from TEST_CLIENT4 (any port) *and*
+ # has TSIG signed by TEST_ZONE_NAME; all others will be rejected.
+ acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "ACCEPT",
+ "from": TEST_CLIENT4[0],
+ "key": TEST_ZONE_NAME.to_text()}])}
+
+ # If the message doesn't contain TSIG, it doesn't match the ACCEPT
+ # ACL entry, and the request should be rejected.
+ session = UpdateSession(self._update_msg,
+ TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS,
+ self._datasrc_client,
+ acl_map))
+ self.assertEqual((UPDATE_ERROR, None, None), session.handle())
+ self.check_response(session.get_message(), Rcode.REFUSED())
+
+ # If the message contains TSIG, it should match the ACCEPT
+ # ACL entry, and the request should be granted.
+ session = UpdateSession(create_update_msg(tsig_key=TEST_TSIG_KEY),
+ TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS,
+ self._datasrc_client,
+ acl_map))
+ self.assertEqual((UPDATE_SUCCESS, TEST_ZONE_NAME, TEST_RRCLASS),
+ session.handle())
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/ddns/tests/zone_config_tests.py b/src/lib/python/isc/ddns/tests/zone_config_tests.py
new file mode 100644
index 0000000..7facb48
--- /dev/null
+++ b/src/lib/python/isc/ddns/tests/zone_config_tests.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+from isc.dns import *
+from isc.datasrc import DataSourceClient
+from isc.ddns.zone_config import *
+import isc.acl.dns
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
+
+import unittest
+import socket
+
+# Some common test parameters
+TEST_ZONE_NAME = Name('example.org')
+TEST_SECONDARY_ZONE_NAME = Name('example.com')
+TEST_RRCLASS = RRClass.IN()
+TEST_TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+TEST_ACL_CONTEXT = isc.acl.dns.RequestContext(
+ socket.getaddrinfo("192.0.2.1", 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP, socket.AI_NUMERICHOST)[0][4])
+
+class FakeDataSourceClient:
+ '''Faked data source client used in the ZoneConfigTest.
+
+ It emulates isc.datasrc.DataSourceClient, but only has to provide
+ the find_zone() interface (and only the first element of the return
+ value matters). By default it returns 'SUCCESS' (exact match) for
+ any input. It can be dynamically customized via the set_find_result()
+ method.
+
+ '''
+ def __init__(self):
+ self.__find_result = DataSourceClient.SUCCESS
+
+ def find_zone(self, zname):
+ return (self.__find_result, None)
+
+ def set_find_result(self, result):
+ self.__find_result = result
+
+class ZoneConfigTest(unittest.TestCase):
+ '''Some basic tests for the ZoneConfig class.'''
+ def setUp(self):
+ self.__datasrc_client = FakeDataSourceClient()
+ self.zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
+ TEST_RRCLASS, self.__datasrc_client)
+
+ def test_find_zone(self):
+ # Primay zone case: zone is in the data source, and not in secondaries
+ self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
+ (self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
+
+ # Secondary zone case: zone is in the data source and in secondaries.
+ self.assertEqual((ZONE_SECONDARY, None),
+ (self.zconfig.find_zone(TEST_SECONDARY_ZONE_NAME,
+ TEST_RRCLASS)))
+
+ # 'not found' case: zone not in the data source.
+ self.__datasrc_client.set_find_result(DataSourceClient.NOTFOUND)
+ self.assertEqual((ZONE_NOTFOUND, None),
+ (self.zconfig.find_zone(Name('example'),
+ TEST_RRCLASS)))
+ # same for the partial match
+ self.__datasrc_client.set_find_result(DataSourceClient.PARTIALMATCH)
+ self.assertEqual((ZONE_NOTFOUND, None),
+ (self.zconfig.find_zone(Name('example'),
+ TEST_RRCLASS)))
+ # a bit unusual case: zone not in the data source, but in secondaries.
+ # this is probably a configuration error, but ZoneConfig doesn't do
+ # this level check.
+ self.__datasrc_client.set_find_result(DataSourceClient.NOTFOUND)
+ self.assertEqual((ZONE_NOTFOUND, None),
+ (self.zconfig.find_zone(TEST_ZONE_NAME,
+ TEST_RRCLASS)))
+ # zone class doesn't match (but zone name matches)
+ self.__datasrc_client.set_find_result(DataSourceClient.SUCCESS)
+ zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
+ RRClass.CH(), self.__datasrc_client)
+ self.assertEqual((ZONE_NOTFOUND, None),
+ (zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
+ # similar to the previous case, but also in the secondary list
+ zconfig = ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ RRClass.CH(), self.__datasrc_client)
+ self.assertEqual((ZONE_NOTFOUND, None),
+ (zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
+
+ # check some basic tests varying the secondary list.
+ # empty secondary list doesn't cause any disruption.
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self.__datasrc_client)
+ self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
+ self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
+ # adding some mulitle tuples, including subdomain of the test zone
+ # name, and the same zone name but a different class
+ zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS),
+ (Name('example'), TEST_RRCLASS),
+ (Name('sub.example.org'), TEST_RRCLASS),
+ (TEST_ZONE_NAME, RRClass.CH())},
+ TEST_RRCLASS, self.__datasrc_client)
+ self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
+ self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
+
+class ACLConfigTest(unittest.TestCase):
+ def setUp(self):
+ self.__datasrc_client = FakeDataSourceClient()
+ self.__zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
+ TEST_RRCLASS, self.__datasrc_client)
+
+ def test_get_update_acl(self):
+ # By default, no ACL is set, and the default ACL is "reject all"
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS)
+ self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
+
+ # Add a map entry that would match the request, and it should now be
+ # accepted.
+ acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "ACCEPT"}])}
+ self.__zconfig.set_update_acl_map(acl_map)
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS)
+ self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
+
+ # 'All reject' ACL will still apply for any other zones
+ acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS)
+ self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH())
+ self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
+
+ # Test with a map with a few more ACL entries. Should be nothing
+ # special.
+ acl_map = {(Name('example.com'), TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "REJECT"}]),
+ (TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "ACCEPT"}]),
+ (TEST_ZONE_NAME, RRClass.CH()):
+ REQUEST_LOADER.load([{"action": "DROP"}])}
+ self.__zconfig.set_update_acl_map(acl_map)
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS)
+ self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
+ acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS)
+ self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH())
+ self.assertEqual(DROP, acl.execute(TEST_ACL_CONTEXT))
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/ddns/zone_config.py b/src/lib/python/isc/ddns/zone_config.py
new file mode 100644
index 0000000..848eac1
--- /dev/null
+++ b/src/lib/python/isc/ddns/zone_config.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from isc.acl.dns import REQUEST_LOADER
+import isc.dns
+from isc.datasrc import DataSourceClient
+
+# Constants representing zone types
+ZONE_NOTFOUND = -1 # Zone isn't found in find_zone()
+ZONE_PRIMARY = 0 # Primary zone
+ZONE_SECONDARY = 1 # Secondary zone
+
+# The default ACL if unspecifed on construction of ZoneConfig.
+DEFAULT_ACL = REQUEST_LOADER.load([{"action": "REJECT"}])
+
+class ZoneConfig:
+ '''A temporary helper class to encapsulate zone related configuration.
+
+ Its find_zone method will search the conceptual configuration for a
+ given zone, and return a tuple of zone type (primary or secondary) and
+ the client object to access the data source stroing the zone.
+ It's very likely that details of zone related configurations like this
+ will change in near future, so the main purpose of this class is to
+ provide an independent interface for the main DDNS session module
+ until the details are fixed.
+
+ '''
+ def __init__(self, secondaries, datasrc_class, datasrc_client, acl_map={}):
+ '''Constructor.
+
+ Parameters:
+ - secondaries: a set of 2-element tuples. Each element is a pair
+ of isc.dns.Name and isc.dns.RRClass, and identifies a single
+ secondary zone.
+ - datasrc_class: isc.dns.RRClass object. Specifies the RR class
+ of datasrc_client.
+ - datasrc_client: isc.dns.DataSourceClient object. A data source
+ class for the RR class of datasrc_class. It's expected to contain
+ a zone that is eventually updated in the ddns package.
+ - acl_map: a dictionary that maps a tuple of
+ (isc.dns.Name, isc.dns.RRClass) to an isc.dns.dns.RequestACL
+ object. It defines an ACL to be applied to the zone defined
+ by the tuple. If unspecified, or the map is empty, the default
+ ACL will be applied to all zones, which is to reject any requests.
+
+ '''
+ self.__secondaries = secondaries
+ self.__datasrc_class = datasrc_class
+ self.__datasrc_client = datasrc_client
+ self.__default_acl = DEFAULT_ACL
+ self.__acl_map = acl_map
+
+ def find_zone(self, zone_name, zone_class):
+ '''Return the type and accessor client object for given zone.'''
+ if self.__datasrc_class == zone_class and \
+ self.__datasrc_client.find_zone(zone_name)[0] == \
+ DataSourceClient.SUCCESS:
+ if (zone_name, zone_class) in self.__secondaries:
+ return ZONE_SECONDARY, None
+ return ZONE_PRIMARY, self.__datasrc_client
+ return ZONE_NOTFOUND, None
+
+ def get_update_acl(self, zone_name, zone_class):
+ '''Return the update ACL for the given zone.
+
+ This method searches the internally stored ACL map to see if
+ there's an ACL to be applied to the given zone. If found, that
+ ACL will be returned; otherwise the default ACL (see the constructor
+ description) will be returned.
+
+ Parameters:
+ zone_name (isc.dns.Name): The zone name.
+ zone_class (isc.dns.RRClass): The zone class.
+ '''
+ acl = self.__acl_map.get((zone_name, zone_class))
+ if acl is not None:
+ return acl
+ return self.__default_acl
+
+ def set_update_acl_map(self, new_map):
+ '''Set a new ACL map.
+
+ This replaces any stored ACL map, either at construction or
+ by a previous call to this method, with the given new one.
+
+ Parameter:
+ new_map: same as the acl_map parameter of the constructor.
+
+ '''
+ self.__acl_map = new_map
diff --git a/src/lib/python/isc/log/Makefile.am b/src/lib/python/isc/log/Makefile.am
index 5ff2c28..3658c17 100644
--- a/src/lib/python/isc/log/Makefile.am
+++ b/src/lib/python/isc/log/Makefile.am
@@ -13,7 +13,7 @@ log_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
# placed after -Wextra defined in AM_CXXFLAGS
log_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
log_la_LDFLAGS = $(PYTHON_LDFLAGS)
-log_la_LDFLAGS += -module
+log_la_LDFLAGS += -module -avoid-version
log_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
log_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
log_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
@@ -23,15 +23,6 @@ log_la_LIBADD += $(PYTHON_LIB)
# This is not installed, it helps locate the module during tests
EXTRA_DIST = __init__.py
-# We're going to abuse install-data-local for a pre-install check.
-# This is to be considered a short term hack and is expected to be removed
-# in a near future version.
-install-data-local:
- if test -d @pyexecdir@/isc/log; then \
- echo "@pyexecdir@/isc/log is deprecated, and will confuse newer versions. Please (re)move it by hand."; \
- exit 1; \
- fi
-
pytest:
$(SHELL) tests/log_test
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index 17452f0..69e70b7 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -28,7 +28,11 @@
#include <string>
#include <boost/bind.hpp>
+#include <util/python/pycppwrapper_util.h>
+#include <log/log_dbglevels.h>
+
using namespace isc::log;
+using namespace isc::util::python;
using std::string;
using boost::bind;
@@ -299,7 +303,8 @@ public:
extern PyTypeObject logger_type;
int
-Logger_init(LoggerWrapper* self, PyObject* args) {
+Logger_init(PyObject* po_self, PyObject* args, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
const char* name;
if (!PyArg_ParseTuple(args, "s", &name)) {
return (-1);
@@ -319,7 +324,9 @@ Logger_init(LoggerWrapper* self, PyObject* args) {
}
void
-Logger_destroy(LoggerWrapper* const self) {
+//Logger_destroy(LoggerWrapper* const self) {
+Logger_destroy(PyObject* po_self) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
delete self->logger_;
self->logger_ = NULL;
Py_TYPE(self)->tp_free(self);
@@ -347,7 +354,8 @@ severityToText(const Severity& severity) {
}
PyObject*
-Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveSeverity(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
try {
return (Py_BuildValue("s",
severityToText(
@@ -364,7 +372,8 @@ Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
}
PyObject*
-Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveDebugLevel(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
try {
return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
}
@@ -379,7 +388,8 @@ Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
}
PyObject*
-Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+Logger_setSeverity(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
const char* severity;
int dbgLevel = 0;
if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
@@ -421,27 +431,32 @@ Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
}
PyObject*
-Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isInfoEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
}
PyObject*
-Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isWarnEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
}
PyObject*
-Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isErrorEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
}
PyObject*
-Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isFatalEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
}
PyObject*
-Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+Logger_isDebugEnabled(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
int level = MIN_DEBUG_LEVEL;
if (!PyArg_ParseTuple(args, "|i", &level)) {
return (NULL);
@@ -466,53 +481,39 @@ Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
string
objectToStr(PyObject* object, bool convert) {
- PyObject* cleanup(NULL);
+ PyObjectContainer objstr_container;
if (convert) {
- object = cleanup = PyObject_Str(object);
- if (object == NULL) {
+ PyObject* text_obj = PyObject_Str(object);
+ if (text_obj == NULL) {
+ // PyObject_Str could fail for various reasons, including because
+ // the object cannot be converted to a string. We exit with
+ // InternalError to preserve the PyErr set in PyObject_Str.
throw InternalError();
}
- }
- const char* value;
- PyObject* tuple(Py_BuildValue("(O)", object));
- if (tuple == NULL) {
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
- throw InternalError();
+ objstr_container.reset(text_obj);
+ object = objstr_container.get();
}
- if (!PyArg_ParseTuple(tuple, "s", &value)) {
- Py_DECREF(tuple);
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
+ PyObjectContainer tuple_container(Py_BuildValue("(O)", object));
+ const char* value;
+ if (!PyArg_ParseTuple(tuple_container.get(), "s", &value)) {
throw InternalError();
}
- string result(value);
- Py_DECREF(tuple);
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
- return (result);
+ return (string(value));
}
// Generic function to output the logging message. Called by the real functions.
-template<class Function>
+template <class Function>
PyObject*
Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
try {
- Py_ssize_t number(PyObject_Length(args));
+ const Py_ssize_t number(PyObject_Length(args));
if (number < 0) {
return (NULL);
}
// Which argument is the first to format?
- size_t start(1);
- if (dbgLevel) {
- start ++;
- }
-
+ const size_t start = dbgLevel ? 2 : 1;
if (number < start) {
return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
"logging call, at least %zu needed and %zd "
@@ -520,18 +521,10 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
}
// Extract the fixed arguments
- PyObject *midO(PySequence_GetItem(args, start - 1));
- if (midO == NULL) {
- return (NULL);
- }
- string mid(objectToStr(midO, false));
long dbg(0);
if (dbgLevel) {
- PyObject *dbgO(PySequence_GetItem(args, 0));
- if (dbgO == NULL) {
- return (NULL);
- }
- dbg = PyLong_AsLong(dbgO);
+ PyObjectContainer dbg_container(PySequence_GetItem(args, 0));
+ dbg = PyLong_AsLong(dbg_container.get());
if (PyErr_Occurred()) {
return (NULL);
}
@@ -540,16 +533,22 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
// We create the logging message right now. If we fail to convert a
// parameter to string, at least the part that we already did will
// be output
+ PyObjectContainer msgid_container(PySequence_GetItem(args, start - 1));
+ const string mid(objectToStr(msgid_container.get(), false));
Logger::Formatter formatter(function(dbg, mid.c_str()));
// Now process the rest of parameters, convert each to string and put
// into the formatter. It will print itself in the end.
for (size_t i(start); i < number; ++ i) {
- PyObject* param(PySequence_GetItem(args, i));
- if (param == NULL) {
- return (NULL);
+ PyObjectContainer param_container(PySequence_GetItem(args, i));
+ try {
+ formatter = formatter.arg(objectToStr(param_container.get(),
+ true));
+ }
+ catch (...) {
+ formatter.deactivate();
+ throw;
}
- formatter = formatter.arg(objectToStr(param, true));
}
Py_RETURN_NONE;
}
@@ -569,72 +568,74 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
// Now map the functions into the performOutput. I wish C++ could do
// functional programming.
PyObject*
-Logger_debug(LoggerWrapper* self, PyObject* args) {
+Logger_debug(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
args, true));
}
PyObject*
-Logger_info(LoggerWrapper* self, PyObject* args) {
+Logger_info(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
args, false));
}
PyObject*
-Logger_warn(LoggerWrapper* self, PyObject* args) {
+Logger_warn(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
args, false));
}
PyObject*
-Logger_error(LoggerWrapper* self, PyObject* args) {
+Logger_error(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
args, false));
}
PyObject*
-Logger_fatal(LoggerWrapper* self, PyObject* args) {
+Logger_fatal(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
args, false));
}
PyMethodDef loggerMethods[] = {
- { "get_effective_severity",
- reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
- METH_NOARGS, "Returns the effective logging severity as string" },
- { "get_effective_debug_level",
- reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
- METH_NOARGS, "Returns the current debug level." },
- { "set_severity",
- reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+ { "get_effective_severity", Logger_getEffectiveSeverity, METH_NOARGS,
+ "Returns the effective logging severity as string" },
+ { "get_effective_debug_level", Logger_getEffectiveDebugLevel, METH_NOARGS,
+ "Returns the current debug level." },
+ { "set_severity", Logger_setSeverity, METH_VARARGS,
"Sets the severity of a logger. The parameters are severity as a "
"string and, optionally, a debug level (integer in range 0-99). "
"The severity may be NULL, in which case an inherited value is taken."
},
- { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
- METH_VARARGS, "Returns if the logger would log debug message now. "
+ { "is_debug_enabled", Logger_isDebugEnabled, METH_VARARGS,
+ "Returns if the logger would log debug message now. "
"You can provide a desired debug level." },
- { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
- METH_NOARGS, "Returns if the logger would log info message now." },
- { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
- METH_NOARGS, "Returns if the logger would log warn message now." },
- { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
- METH_NOARGS, "Returns if the logger would log error message now." },
- { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
- METH_NOARGS, "Returns if the logger would log fatal message now." },
- { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+ { "is_info_enabled", Logger_isInfoEnabled, METH_NOARGS,
+ "Returns if the logger would log info message now." },
+ { "is_warn_enabled", Logger_isWarnEnabled, METH_NOARGS,
+ "Returns if the logger would log warn message now." },
+ { "is_error_enabled", Logger_isErrorEnabled, METH_NOARGS,
+ "Returns if the logger would log error message now." },
+ { "is_fatal_enabled", Logger_isFatalEnabled, METH_NOARGS,
+ "Returns if the logger would log fatal message now." },
+ { "debug", Logger_debug, METH_VARARGS,
"Logs a debug-severity message. It takes the debug level, message ID "
"and any number of stringifiable arguments to the message." },
- { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+ { "info", Logger_info, METH_VARARGS,
"Logs a info-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+ { "warn", Logger_warn, METH_VARARGS,
"Logs a warn-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+ { "error", Logger_error, METH_VARARGS,
"Logs a error-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+ { "fatal", Logger_fatal, METH_VARARGS,
"Logs a fatal-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
{ NULL, NULL, 0, NULL }
@@ -645,7 +646,7 @@ PyTypeObject logger_type = {
"isc.log.Logger",
sizeof(LoggerWrapper), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(Logger_destroy), // tp_dealloc
+ Logger_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -677,7 +678,7 @@ PyTypeObject logger_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(Logger_init), // tp_init
+ Logger_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
@@ -714,16 +715,52 @@ PyInit_log(void) {
return (NULL);
}
- if (PyType_Ready(&logger_type) < 0) {
- return (NULL);
- }
+ // Finalize logger class and add in the definitions of the standard debug
+ // levels. These can then be referred to in Python through the constants
+ // log.DBGLVL_XXX.
+ // N.B. These should be kept in sync with the constants defined in
+ // log_dbglevels.h.
+ try {
+ if (PyType_Ready(&logger_type) < 0) {
+ throw InternalError();
+ }
+ void* p = &logger_type;
+ if (PyModule_AddObject(mod, "Logger",
+ static_cast<PyObject*>(p)) < 0) {
+ throw InternalError();
+ }
- if (PyModule_AddObject(mod, "Logger",
- static_cast<PyObject*>(static_cast<void*>(
- &logger_type))) < 0) {
+ installClassVariable(logger_type, "DBGLVL_START_SHUT",
+ Py_BuildValue("I", DBGLVL_START_SHUT));
+ installClassVariable(logger_type, "DBGLVL_COMMAND",
+ Py_BuildValue("I", DBGLVL_COMMAND));
+ installClassVariable(logger_type, "DBGLVL_COMMAND_DATA",
+ Py_BuildValue("I", DBGLVL_COMMAND_DATA));
+ installClassVariable(logger_type, "DBGLVL_TRACE_BASIC",
+ Py_BuildValue("I", DBGLVL_TRACE_BASIC));
+ installClassVariable(logger_type, "DBGLVL_TRACE_BASIC_DATA",
+ Py_BuildValue("I", DBGLVL_TRACE_BASIC_DATA));
+ installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL",
+ Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
+ installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
+ Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+ } catch (const InternalError&) {
+ Py_DECREF(mod);
+ return (NULL);
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Log initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+ Py_DECREF(mod);
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Log initialization");
+ Py_DECREF(mod);
return (NULL);
}
- Py_INCREF(&logger_type);
+ Py_INCREF(&logger_type);
return (mod);
}
diff --git a/src/lib/python/isc/log/tests/.gitignore b/src/lib/python/isc/log/tests/.gitignore
new file mode 100644
index 0000000..b9cf241
--- /dev/null
+++ b/src/lib/python/isc/log/tests/.gitignore
@@ -0,0 +1 @@
+/log_console.py
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 170eee6..ec29b7a 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -17,6 +17,7 @@ check-local:
chmod +x $(abs_builddir)/log_console.py
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -28,6 +29,7 @@ endif
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done ; \
for pytest in $(PYTESTS_GEN) ; do \
@@ -36,5 +38,6 @@ endif
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 4292b6c..1337654 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -17,6 +17,7 @@
import isc.log
import unittest
import json
+import sys
import bind10_config
from isc.config.ccsession import path_search
@@ -89,6 +90,7 @@ class Logger(unittest.TestCase):
def setUp(self):
isc.log.init("root", "DEBUG", 50)
self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+ self.TEST_MSG = isc.log.create_message('TEST_MESSAGE', '%1')
# Checks defaults of the logger
def defaults(self, logger):
@@ -159,5 +161,44 @@ class Logger(unittest.TestCase):
# Bad type
self.assertRaises(TypeError, logger.debug, "42", "hello")
+ def test_dbglevel_constants(self):
+ """
+ Just check a constant to make sure it is defined and is the
+ correct value. (The constant chosen has a non-zero value to
+ ensure that the code has both define the constant and set its
+ value correctly.)
+ """
+ logger = isc.log.Logger("child")
+ self.assertEqual(logger.DBGLVL_COMMAND, 10)
+
+ def test_param_reference(self):
+ """
+ Check whether passing a parameter to a logger causes a reference leak.
+ """
+ class LogParam:
+ def __str__(self):
+ return 'LogParam'
+ logger = isc.log.Logger("child")
+ param = LogParam()
+ orig_msgrefcnt = sys.getrefcount(param)
+ orig_idrefcnt = sys.getrefcount(self.TEST_MSG)
+ logger.info(self.TEST_MSG, param);
+ self.assertEqual(sys.getrefcount(self.TEST_MSG), orig_idrefcnt)
+ self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+ # intentionally pass an invalid type for debug level. It will
+ # result in TypeError. The passed object still shouldn't leak a
+ # reference.
+ self.assertRaises(TypeError, logger.debug, param, self.TEST_MSG, param)
+ self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+ def test_bad_parameter(self):
+ # a log parameter cannot be converted to a string object.
+ class LogParam:
+ def __str__(self):
+ raise ValueError("LogParam can't be converted to string")
+ logger = isc.log.Logger("child")
+ self.assertRaises(ValueError, logger.info, self.TEST_MSG, LogParam())
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
index 30f8374..6d23df3 100644
--- a/src/lib/python/isc/log_messages/Makefile.am
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -3,6 +3,7 @@ SUBDIRS = work
EXTRA_DIST = __init__.py
EXTRA_DIST += bind10_messages.py
EXTRA_DIST += cmdctl_messages.py
+EXTRA_DIST += ddns_messages.py
EXTRA_DIST += stats_messages.py
EXTRA_DIST += stats_httpd_messages.py
EXTRA_DIST += xfrin_messages.py
@@ -11,11 +12,15 @@ EXTRA_DIST += zonemgr_messages.py
EXTRA_DIST += cfgmgr_messages.py
EXTRA_DIST += config_messages.py
EXTRA_DIST += notify_out_messages.py
+EXTRA_DIST += libddns_messages.py
EXTRA_DIST += libxfrin_messages.py
+EXTRA_DIST += server_common_messages.py
+EXTRA_DIST += dbutil_messages.py
CLEANFILES = __init__.pyc
CLEANFILES += bind10_messages.pyc
CLEANFILES += cmdctl_messages.pyc
+CLEANFILES += ddns_messages.pyc
CLEANFILES += stats_messages.pyc
CLEANFILES += stats_httpd_messages.pyc
CLEANFILES += xfrin_messages.pyc
@@ -24,7 +29,10 @@ CLEANFILES += zonemgr_messages.pyc
CLEANFILES += cfgmgr_messages.pyc
CLEANFILES += config_messages.pyc
CLEANFILES += notify_out_messages.pyc
+CLEANFILES += libddns_messages.pyc
CLEANFILES += libxfrin_messages.pyc
+CLEANFILES += server_common_messages.pyc
+CLEANFILES += dbutil_messages.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/log_messages/dbutil_messages.py b/src/lib/python/isc/log_messages/dbutil_messages.py
new file mode 100644
index 0000000..c06dfef
--- /dev/null
+++ b/src/lib/python/isc/log_messages/dbutil_messages.py
@@ -0,0 +1 @@
+from work.dbutil_messages import *
diff --git a/src/lib/python/isc/log_messages/ddns_messages.py b/src/lib/python/isc/log_messages/ddns_messages.py
new file mode 100644
index 0000000..38d83bb
--- /dev/null
+++ b/src/lib/python/isc/log_messages/ddns_messages.py
@@ -0,0 +1 @@
+from work.ddns_messages import *
diff --git a/src/lib/python/isc/log_messages/libddns_messages.py b/src/lib/python/isc/log_messages/libddns_messages.py
new file mode 100644
index 0000000..58d886d
--- /dev/null
+++ b/src/lib/python/isc/log_messages/libddns_messages.py
@@ -0,0 +1 @@
+from work.libddns_messages import *
diff --git a/src/lib/python/isc/log_messages/server_common_messages.py b/src/lib/python/isc/log_messages/server_common_messages.py
new file mode 100644
index 0000000..a491071
--- /dev/null
+++ b/src/lib/python/isc/log_messages/server_common_messages.py
@@ -0,0 +1 @@
+from work.server_common_messages import *
diff --git a/src/lib/python/isc/log_messages/work/.gitignore b/src/lib/python/isc/log_messages/work/.gitignore
new file mode 100644
index 0000000..05a7653
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/.gitignore
@@ -0,0 +1,2 @@
+/__init__.py
+/*_messages.py
diff --git a/src/lib/python/isc/log_messages/work/Makefile.am b/src/lib/python/isc/log_messages/work/Makefile.am
index 9bc5e0f..ad5ee0c 100644
--- a/src/lib/python/isc/log_messages/work/Makefile.am
+++ b/src/lib/python/isc/log_messages/work/Makefile.am
@@ -5,7 +5,7 @@ python_PYTHON = __init__.py
pythondir = $(pyexecdir)/isc/log_messages/
-CLEANFILES = __init__.pyc
+CLEANFILES = __init__.pyc __init__.pyo
CLEANDIRS = __pycache__
clean-local:
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 6b91c87..83ac1d0 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -21,6 +21,7 @@ import threading
import time
import errno
from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient
from isc.net import addr
import isc
from isc.log_messages.notify_out_messages import *
@@ -31,9 +32,11 @@ logger = isc.log.Logger("notify_out")
# we can't import we should not start anyway, and logging an error
# is a bad idea since the logging system is most likely not
# initialized yet. see trac ticket #1103
-from pydnspp import *
+from isc.dns import *
ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
+ZONE_XFRIN_FAILED = 'zone_xfrin_failed'
+
_MAX_NOTIFY_NUM = 30
_MAX_NOTIFY_TRY_NUM = 5
_EVENT_NONE = 0
@@ -51,6 +54,24 @@ _BAD_REPLY_PACKET = 5
SOCK_DATA = b's'
+# borrowed from xfrin.py @ #1298. We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text() + '/' + str(zone_class)
+
+class NotifyOutDataSourceError(Exception):
+ """An exception raised when data source error happens within notify out.
+
+ This exception is expected to be caught within the notify_out module.
+
+ """
+ pass
+
class ZoneNotifyInfo:
'''This class keeps track of notify-out information for one zone.'''
@@ -123,16 +144,20 @@ class NotifyOut:
self._nonblock_event = threading.Event()
def _init_notify_out(self, datasrc_file):
- '''Get all the zones name and its notify target's address
+ '''Get all the zones name and its notify target's address.
+
TODO, currently the zones are got by going through the zone
table in database. There should be a better way to get them
and also the setting 'also_notify', and there should be one
- mechanism to cover the changed datasrc.'''
+ mechanism to cover the changed datasrc.
+
+ '''
self._db_file = datasrc_file
for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
zone_id = (zone_name, zone_class)
self._notify_infos[zone_id] = ZoneNotifyInfo(zone_name, zone_class)
- slaves = self._get_notify_slaves_from_ns(zone_name)
+ slaves = self._get_notify_slaves_from_ns(Name(zone_name),
+ RRClass(zone_class))
for item in slaves:
self._notify_infos[zone_id].notify_slaves.append((item, 53))
@@ -141,17 +166,19 @@ class NotifyOut:
the only interface for class NotifyOut which can be called
by other object.
Internally, the function only set the zone's notify-reply
- timeout to now, then notify message will be sent out. '''
+ timeout to now, then notify message will be sent out.
+ Returns False if the zone/class is not known, True if it is
+ (even if there are no slaves)'''
if zone_name[len(zone_name) - 1] != '.':
zone_name += '.'
zone_id = (zone_name, zone_class)
if zone_id not in self._notify_infos:
- return
+ return False
# Has no slave servers, skip it.
if (len(self._notify_infos[zone_id].notify_slaves) <= 0):
- return
+ return True
with self._lock:
if (self.notify_num >= _MAX_NOTIFY_NUM) or (zone_id in self._notifying_zones):
@@ -163,6 +190,7 @@ class NotifyOut:
self._notifying_zones.append(zone_id)
if not self._nonblock_event.isSet():
self._nonblock_event.set()
+ return True
def _dispatcher(self, started_event):
started_event.set() # Let the master know we are alive already
@@ -227,14 +255,16 @@ class NotifyOut:
self._thread.join()
# Clean up
+ self._write_sock.close()
self._write_sock = None
+ self._read_sock.close()
self._read_sock = None
self._thread = None
def _get_rdata_data(self, rr):
return rr[7].strip()
- def _get_notify_slaves_from_ns(self, zone_name):
+ def _get_notify_slaves_from_ns(self, zone_name, zone_class):
'''Get all NS records, then remove the primary master from ns rrset,
then use the name in NS record rdata part to get the a/aaaa records
in the same zone. the targets listed in a/aaaa record rdata are treated
@@ -242,28 +272,56 @@ class NotifyOut:
Note: this is the simplest way to get the address of slaves,
but not correct, it can't handle the delegation slaves, or the CNAME
and DNAME logic.
- TODO. the function should be provided by one library.'''
- ns_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'NS', self._db_file)
- soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
- ns_rr_name = []
- for ns in ns_rrset:
- ns_rr_name.append(self._get_rdata_data(ns))
-
- if len(soa_rrset) > 0:
- sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
- if sname in ns_rr_name:
- ns_rr_name.remove(sname)
-
- addr_list = []
- for rr_name in ns_rr_name:
- a_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'A', self._db_file)
- aaaa_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'AAAA', self._db_file)
- for rr in a_rrset:
- addr_list.append(self._get_rdata_data(rr))
- for rr in aaaa_rrset:
- addr_list.append(self._get_rdata_data(rr))
-
- return addr_list
+ TODO. the function should be provided by one library.
+
+ '''
+ # Prepare data source client. This should eventually be moved to
+ # an earlier stage of initialization and also support multiple
+ # data sources.
+ datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+ try:
+ ds_client = DataSourceClient('sqlite3', datasrc_config)
+ except isc.datasrc.Error as ex:
+ logger.error(NOTIFY_OUT_DATASRC_ACCESS_FAILURE, ex)
+ return []
+
+ result, finder = ds_client.find_zone(zone_name)
+ if result is not DataSourceClient.SUCCESS:
+ logger.error(NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND,
+ format_zone_str(zone_name, zone_class))
+ return []
+
+ result, ns_rrset, _ = finder.find(zone_name, RRType.NS())
+ if result is not finder.SUCCESS or ns_rrset is None:
+ logger.warn(NOTIFY_OUT_ZONE_NO_NS,
+ format_zone_str(zone_name, zone_class))
+ return []
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ if result is not finder.SUCCESS or soa_rrset is None or \
+ soa_rrset.get_rdata_count() != 1:
+ logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
+ format_zone_str(zone_name, zone_class))
+ return [] # broken zone anyway, stop here.
+ soa_mname = Name(soa_rrset.get_rdata()[0].to_text().split(' ')[0])
+
+ addrs = []
+ for ns_rdata in ns_rrset.get_rdata():
+ ns_name = Name(ns_rdata.to_text())
+ if soa_mname == ns_name:
+ continue
+ ns_result, ns_finder = ds_client.find_zone(ns_name)
+ if ns_result is DataSourceClient.SUCCESS or \
+ ns_result is DataSourceClient.PARTIALMATCH:
+ result, rrset, _ = ns_finder.find(ns_name, RRType.A())
+ if result is ns_finder.SUCCESS and rrset is not None:
+ addrs.extend([a.to_text() for a in rrset.get_rdata()])
+
+ result, rrset, _ = ns_finder.find(ns_name, RRType.AAAA())
+ if result is ns_finder.SUCCESS and rrset is not None:
+ addrs.extend([aaaa.to_text()
+ for aaaa in rrset.get_rdata()])
+
+ return addrs
def _prepare_select_info(self):
'''
@@ -404,8 +462,9 @@ class NotifyOut:
self._nonblock_event.set()
def _send_notify_message_udp(self, zone_notify_info, addrinfo):
- msg, qid = self._create_notify_message(zone_notify_info.zone_name,
- zone_notify_info.zone_class)
+ msg, qid = self._create_notify_message(
+ Name(zone_notify_info.zone_name),
+ RRClass(zone_notify_info.zone_class))
render = MessageRenderer()
render.set_length_limit(512)
msg.to_wire(render)
@@ -426,17 +485,6 @@ class NotifyOut:
return True
- def _create_rrset_from_db_record(self, record, zone_class):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
- This function should be updated first. TODO, the function is copied from xfrout, there
- should be library for creating one rrset. '''
- rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
- rdata_ = Rdata(rrtype_, RRClass(zone_class), " ".join(record[sqlite3_ds.RR_RDATA_INDEX:]))
- rrset_ = RRset(Name(record[sqlite3_ds.RR_NAME_INDEX]), RRClass(zone_class), \
- rrtype_, RRTTL( int(record[sqlite3_ds.RR_TTL_INDEX])))
- rrset_.add_rdata(rdata_)
- return rrset_
-
def _create_notify_message(self, zone_name, zone_class):
msg = Message(Message.RENDER)
qid = random.randint(0, 0xFFFF)
@@ -444,14 +492,35 @@ class NotifyOut:
msg.set_opcode(Opcode.NOTIFY())
msg.set_rcode(Rcode.NOERROR())
msg.set_header_flag(Message.HEADERFLAG_AA)
- question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
- msg.add_question(question)
- # Add soa record to answer section
- soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
- rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+ msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
+ zone_class))
return msg, qid
+ def _get_zone_soa(self, zone_name, zone_class):
+ # We create (and soon drop) the data source client here because
+ # clients should be thread specific. We could let the main thread
+ # loop (_dispatcher) create and retain the client in order to avoid
+ # the overhead when we generalize the interface (and we may also
+ # revisit the design of notify_out more substantially anyway).
+ datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+ result, finder = DataSourceClient('sqlite3',
+ datasrc_config).find_zone(zone_name)
+ if result is not DataSourceClient.SUCCESS:
+ raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+ zone_name.to_text() + '/' +
+ zone_class.to_text() + ' not found')
+
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ if result is not finder.SUCCESS or soa_rrset is None or \
+ soa_rrset.get_rdata_count() != 1:
+ raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+ zone_name.to_text() + '/' +
+ zone_class.to_text() +
+ ' is broken: no valid SOA found')
+
+ return soa_rrset
+
def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
'''Parse the notify reply message.
rcode will not checked here, If we get the response
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
index 570f51e..3bc0f38 100644
--- a/src/lib/python/isc/notify/notify_out_messages.mes
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -15,6 +15,18 @@
# No namespace declaration - these constants go in the global namespace
# of the notify_out_messages python module.
+% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+
+% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
+
% NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3
The notify_out library tried to send a notify message to the given
address, but it appears to be an invalid address. The configuration
@@ -48,6 +60,16 @@ given address, but the reply did not have the QR bit set to one.
Since there was a response, no more notifies will be sent to this
server for this notification event.
+% NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+
% NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded
The maximum number of retries for the notify target has been exceeded.
Either the address of the secondary nameserver is wrong, or it is not
@@ -72,12 +94,11 @@ The notify message to the given address (noted as address#port) has
timed out, and the message will be resent until the max retry limit
is reached.
-% NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1
-There was an uncaught exception in the handling of a notify reply
-message, either in the message parser, or while trying to extract data
-from the parsed message. The error is printed, and notify_out will
-treat the response as a bad message, but this does point to a
-programming error, since all exceptions should have been caught
-explicitly. Please file a bug report. Since there was a response,
-no more notifies will be sent to this server for this notification
-event.
+% NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
+
+% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
diff --git a/src/lib/python/isc/notify/tests/.gitignore b/src/lib/python/isc/notify/tests/.gitignore
new file mode 100644
index 0000000..69f6d95
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/.gitignore
@@ -0,0 +1 @@
+/notify_out_test
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 00c2eee..3af5991 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -1,12 +1,20 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = notify_out_test.py
EXTRA_DIST = $(PYTESTS)
+EXTRA_DIST += testdata/test.sqlite3 testdata/brokentest.sqlite3
+# The rest of the files are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/example.net
+EXTRA_DIST += testdata/nons.example testdata/nosoa.example
+EXTRA_DIST += testdata/multisoa.example
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,5 +28,7 @@ endif
echo Running test: $$pytest ; \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
+ TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 83f6d1a..1b3a4a1 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -19,9 +19,11 @@ import os
import tempfile
import time
import socket
-from isc.datasrc import sqlite3_ds
from isc.notify import notify_out, SOCK_DATA
import isc.log
+from isc.dns import *
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
# our fake socket, where we can read and insert messages
class MockSocket():
@@ -92,10 +94,8 @@ class TestZoneNotifyInfo(unittest.TestCase):
class TestNotifyOut(unittest.TestCase):
def setUp(self):
- self._db_file = tempfile.NamedTemporaryFile(delete=False)
- sqlite3_ds.load(self._db_file.name, 'example.net.', self._example_net_data_reader)
- sqlite3_ds.load(self._db_file.name, 'example.com.', self._example_com_data_reader)
- self._notify = notify_out.NotifyOut(self._db_file.name)
+ self._db_file = TESTDATA_SRCDIR + '/test.sqlite3'
+ self._notify = notify_out.NotifyOut(self._db_file)
self._notify._notify_infos[('example.com.', 'IN')] = MockZoneNotifyInfo('example.com.', 'IN')
self._notify._notify_infos[('example.com.', 'CH')] = MockZoneNotifyInfo('example.com.', 'CH')
self._notify._notify_infos[('example.net.', 'IN')] = MockZoneNotifyInfo('example.net.', 'IN')
@@ -110,46 +110,52 @@ class TestNotifyOut(unittest.TestCase):
com_ch_info = self._notify._notify_infos[('example.com.', 'CH')]
com_ch_info.notify_slaves.append(('1.1.1.1', 5353))
- def tearDown(self):
- self._db_file.close()
- os.unlink(self._db_file.name)
-
def test_send_notify(self):
notify_out._MAX_NOTIFY_NUM = 2
self._notify._nonblock_event.clear()
- self._notify.send_notify('example.net')
+ self.assertTrue(self._notify.send_notify('example.net'))
self.assertTrue(self._notify._nonblock_event.isSet())
self.assertEqual(self._notify.notify_num, 1)
self.assertEqual(self._notify._notifying_zones[0], ('example.net.', 'IN'))
- self._notify.send_notify('example.com')
+ self.assertTrue(self._notify.send_notify('example.com'))
self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(self._notify._notifying_zones[1], ('example.com.', 'IN'))
# notify_num is equal to MAX_NOTIFY_NUM, append it to waiting_zones list.
self._notify._nonblock_event.clear()
- self._notify.send_notify('example.com', 'CH')
+ self.assertTrue(self._notify.send_notify('example.com', 'CH'))
# add waiting zones won't set nonblock_event.
self.assertFalse(self._notify._nonblock_event.isSet())
self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(1, len(self._notify._waiting_zones))
# zone_id is already in notifying_zones list, append it to waiting_zones list.
- self._notify.send_notify('example.net')
+ self.assertTrue(self._notify.send_notify('example.net'))
self.assertEqual(2, len(self._notify._waiting_zones))
self.assertEqual(self._notify._waiting_zones[1], ('example.net.', 'IN'))
# zone_id is already in waiting_zones list, skip it.
- self._notify.send_notify('example.net')
+ self.assertTrue(self._notify.send_notify('example.net'))
self.assertEqual(2, len(self._notify._waiting_zones))
# has no slave masters, skip it.
- self._notify.send_notify('example.org.', 'CH')
+ self.assertTrue(self._notify.send_notify('example.org.', 'CH'))
self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(2, len(self._notify._waiting_zones))
- self._notify.send_notify('example.org.')
+ self.assertTrue(self._notify.send_notify('example.org.'))
+ self.assertEqual(self._notify.notify_num, 2)
+ self.assertEqual(2, len(self._notify._waiting_zones))
+
+ # zone does not exist, should return False, and no change in other
+ # values
+ self.assertFalse(self._notify.send_notify('does.not.exist.'))
+ self.assertEqual(self._notify.notify_num, 2)
+ self.assertEqual(2, len(self._notify._waiting_zones))
+
+ self.assertFalse(self._notify.send_notify('example.net.', 'CH'))
self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(2, len(self._notify._waiting_zones))
@@ -189,6 +195,11 @@ class TestNotifyOut(unittest.TestCase):
# Now make one socket be readable
self._notify._notify_infos[('example.net.', 'IN')].notify_timeout = time.time() + 10
self._notify._notify_infos[('example.com.', 'IN')].notify_timeout = time.time() + 10
+
+ if self._notify._read_sock is not None:
+ self._notify._read_sock.close()
+ if self._notify._write_sock is not None:
+ self._notify._write_sock.close()
self._notify._read_sock, self._notify._write_sock = socket.socketpair()
self._notify._write_sock.send(SOCK_DATA)
replied_zones, timeout_zones = self._notify._wait_for_notify_reply()
@@ -309,39 +320,9 @@ class TestNotifyOut(unittest.TestCase):
self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
self.assertNotEqual(cur_tgt, example_net_info._notify_current)
-
- def _example_net_data_reader(self):
- zone_data = [
- ('example.net.', '1000', 'IN', 'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
- ('example.net.', '1000', 'IN', 'NS', 'a.dns.example.net.'),
- ('example.net.', '1000', 'IN', 'NS', 'b.dns.example.net.'),
- ('example.net.', '1000', 'IN', 'NS', 'c.dns.example.net.'),
- ('a.dns.example.net.', '1000', 'IN', 'A', '1.1.1.1'),
- ('a.dns.example.net.', '1000', 'IN', 'AAAA', '2:2::2:2'),
- ('b.dns.example.net.', '1000', 'IN', 'A', '3.3.3.3'),
- ('b.dns.example.net.', '1000', 'IN', 'AAAA', '4:4::4:4'),
- ('b.dns.example.net.', '1000', 'IN', 'AAAA', '5:5::5:5'),
- ('c.dns.example.net.', '1000', 'IN', 'A', '6.6.6.6'),
- ('c.dns.example.net.', '1000', 'IN', 'A', '7.7.7.7'),
- ('c.dns.example.net.', '1000', 'IN', 'AAAA', '8:8::8:8')]
- for item in zone_data:
- yield item
-
- def _example_com_data_reader(self):
- zone_data = [
- ('example.com.', '1000', 'IN', 'SOA', 'a.dns.example.com. mail.example.com. 1 1 1 1 1'),
- ('example.com.', '1000', 'IN', 'NS', 'a.dns.example.com.'),
- ('example.com.', '1000', 'IN', 'NS', 'b.dns.example.com.'),
- ('example.com.', '1000', 'IN', 'NS', 'c.dns.example.com.'),
- ('a.dns.example.com.', '1000', 'IN', 'A', '1.1.1.1'),
- ('b.dns.example.com.', '1000', 'IN', 'A', '3.3.3.3'),
- ('b.dns.example.com.', '1000', 'IN', 'AAAA', '4:4::4:4'),
- ('b.dns.example.com.', '1000', 'IN', 'AAAA', '5:5::5:5')]
- for item in zone_data:
- yield item
-
def test_get_notify_slaves_from_ns(self):
- records = self._notify._get_notify_slaves_from_ns('example.net.')
+ records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
+ RRClass.IN())
self.assertEqual(6, len(records))
self.assertEqual('8:8::8:8', records[5])
self.assertEqual('7.7.7.7', records[4])
@@ -350,14 +331,32 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual('4:4::4:4', records[1])
self.assertEqual('3.3.3.3', records[0])
- records = self._notify._get_notify_slaves_from_ns('example.com.')
+ records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
+ RRClass.IN())
self.assertEqual(3, len(records))
self.assertEqual('5:5::5:5', records[2])
self.assertEqual('4:4::4:4', records[1])
self.assertEqual('3.3.3.3', records[0])
+ def test_get_notify_slaves_from_ns_unusual(self):
+ self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nons.example'), RRClass.IN()))
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nosoa.example'), RRClass.IN()))
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('multisoa.example'), RRClass.IN()))
+
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nosuchzone.example'), RRClass.IN()))
+
+ # This will cause failure in getting access to the data source.
+ self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('example.com'), RRClass.IN()))
+
def test_init_notify_out(self):
- self._notify._init_notify_out(self._db_file.name)
+ self._notify._init_notify_out(self._db_file)
self.assertListEqual([('3.3.3.3', 53), ('4:4::4:4', 53), ('5:5::5:5', 53)],
self._notify._notify_infos[('example.com.', 'IN')].notify_slaves)
@@ -417,6 +416,5 @@ class TestNotifyOut(unittest.TestCase):
if __name__== "__main__":
isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
-
-
diff --git a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
new file mode 100644
index 0000000..10d64c1
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 differ
diff --git a/src/lib/python/isc/notify/tests/testdata/example.com b/src/lib/python/isc/notify/tests/testdata/example.com
new file mode 100644
index 0000000..5d59819
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.com
@@ -0,0 +1,10 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.com. 1000 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com. 1000 IN NS a.dns.example.com.
+example.com. 1000 IN NS b.dns.example.com.
+example.com. 1000 IN NS c.dns.example.com.
+a.dns.example.com. 1000 IN A 1.1.1.1
+b.dns.example.com. 1000 IN A 3.3.3.3
+b.dns.example.com. 1000 IN AAAA 4:4::4:4
+b.dns.example.com. 1000 IN AAAA 5:5::5:5
diff --git a/src/lib/python/isc/notify/tests/testdata/example.net b/src/lib/python/isc/notify/tests/testdata/example.net
new file mode 100644
index 0000000..001d2d9
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.net
@@ -0,0 +1,14 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.net. 1000 IN SOA a.dns.example.net. mail.example.net. 1 1 1 1 1
+example.net. 1000 IN NS a.dns.example.net.
+example.net. 1000 IN NS b.dns.example.net.
+example.net. 1000 IN NS c.dns.example.net.
+a.dns.example.net. 1000 IN A 1.1.1.1
+a.dns.example.net. 1000 IN AAAA 2:2::2:2
+b.dns.example.net. 1000 IN A 3.3.3.3
+b.dns.example.net. 1000 IN AAAA 4:4::4:4
+b.dns.example.net. 1000 IN AAAA 5:5::5:5
+c.dns.example.net. 1000 IN A 6.6.6.6
+c.dns.example.net. 1000 IN A 7.7.7.7
+c.dns.example.net. 1000 IN AAAA 8:8::8:8
diff --git a/src/lib/python/isc/notify/tests/testdata/multisoa.example b/src/lib/python/isc/notify/tests/testdata/multisoa.example
new file mode 100644
index 0000000..eca2fbd
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/multisoa.example
@@ -0,0 +1,5 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+multisoa.example. 1000 IN SOA a.dns.multisoa.example. mail.multisoa.example. 1 1 1 1 1
+multisoa.example. 1000 IN SOA a.dns.multisoa.example. mail.multisoa.example. 2 2 2 2 2
+multisoa.example. 1000 IN NS a.dns.multisoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/nons.example b/src/lib/python/isc/notify/tests/testdata/nons.example
new file mode 100644
index 0000000..c1fc1b8
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nons.example
@@ -0,0 +1,3 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+nons.example. 1000 IN SOA a.dns.nons.example. mail.nons.example. 1 1 1 1 1
diff --git a/src/lib/python/isc/notify/tests/testdata/nosoa.example b/src/lib/python/isc/notify/tests/testdata/nosoa.example
new file mode 100644
index 0000000..18e87e1
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nosoa.example
@@ -0,0 +1,7 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+;; (SOA has been removed)
+nosoa.example. 1000 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+nosoa.example. 1000 IN NS a.dns.nosoa.example.
+nosoa.example. 1000 IN NS b.dns.nosoa.example.
+nosoa.example. 1000 IN NS c.dns.nosoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 b/src/lib/python/isc/notify/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..d659181
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/python/isc/server_common/Makefile.am b/src/lib/python/isc/server_common/Makefile.am
new file mode 100644
index 0000000..d89df2f
--- /dev/null
+++ b/src/lib/python/isc/server_common/Makefile.am
@@ -0,0 +1,25 @@
+SUBDIRS = tests
+
+python_PYTHON = __init__.py tsig_keyring.py auth_command.py dns_tcp.py
+python_PYTHON += logger.py
+
+pythondir = $(pyexecdir)/isc/server_common
+
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/server_common_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/server_common_messages.py
+
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/server_common_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/server_common_messages.pyc
+
+CLEANDIRS = __pycache__
+
+EXTRA_DIST = server_common_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/server_common_messages.py : server_common_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/server_common_messages.mes
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/server_common/__init__.py b/src/lib/python/isc/server_common/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/server_common/auth_command.py b/src/lib/python/isc/server_common/auth_command.py
new file mode 100644
index 0000000..eb9c892
--- /dev/null
+++ b/src/lib/python/isc/server_common/auth_command.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This module is a utility to create some intermodule command(s) for Auth.'''
+
+from isc.dns import *
+import isc.log
+from isc.config.ccsession import create_command
+from isc.log_messages.server_common_messages import *
+from isc.server_common.logger import logger
+
+AUTH_MODULE_NAME = 'Auth'
+
+def auth_loadzone_command(module_cc, zone_name, zone_class):
+ '''Create a 'loadzone' command with a given zone for Auth server.
+
+ This function checks the Auth module configuration to see if it
+ servers a given zone via an in-memory data source on top of SQLite3
+ data source, and, if so, generate an inter-module command for Auth
+ to force it to reload the zone.
+
+ Parameters:
+ module_cc (CCSession): a CC session that can get access to auth module
+ configuration as a remote configuration
+ zone_name (isc.dns.Name): the zone name to be possibly reloaded
+ zone_class (isc.dns.RRClass): the RR class of the zone to be possibly
+ reloaded.
+
+ Return: a CC command message for the reload if the zone is found;
+ otherwise None.
+
+ '''
+ # Note: this function was originally a dedicated subroutine of xfrin,
+ # but was moved here so it can be shared by some other modules
+ # (specifically, by ddns). It's expected that we'll soon fundamentally
+ # revisit the whole data source related configuration, at which point
+ # this function should be substantially modified if not completely
+ # deprecated (which is a more likely scenario). For this reason, the
+ # corresponding tests were still kept in xfrin.
+
+ datasources, is_default =\
+ module_cc.get_remote_config_value(AUTH_MODULE_NAME, "datasources")
+ if is_default:
+ return None
+ for d in datasources:
+ if "type" not in d:
+ continue
+ try:
+ if "class" in d:
+ dclass = RRClass(d["class"])
+ else:
+ dclass = RRClass("IN")
+ except InvalidRRClass as err:
+ logger.info(PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR, err)
+ continue
+
+ if d["type"].lower() == "memory" and dclass == zone_class:
+ for zone in d["zones"]:
+ if "filetype" not in zone:
+ continue
+ if "origin" not in zone:
+ continue
+ if "filetype" not in zone:
+ continue
+ try:
+ name = Name(zone["origin"])
+ except (EmptyLabel, TooLongLabel, BadLabelType, BadEscape,
+ TooLongName, IncompleteName):
+ logger.info(PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR,
+ err)
+ continue
+
+ if zone["filetype"].lower() == "sqlite3" and name == zone_name:
+ param = {"origin": zone_name.to_text(),
+ "class": zone_class.to_text(),
+ "datasrc": d["type"]}
+ return create_command("loadzone", param)
+ return None
diff --git a/src/lib/python/isc/server_common/dns_tcp.py b/src/lib/python/isc/server_common/dns_tcp.py
new file mode 100644
index 0000000..3b78d0d
--- /dev/null
+++ b/src/lib/python/isc/server_common/dns_tcp.py
@@ -0,0 +1,280 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Utility for handling DNS transactions over TCP.
+
+This module defines a few convenient utility classes for handling DNS
+transactions via a TCP socket.
+
+"""
+
+import isc.log
+from isc.server_common.logger import logger
+from isc.log_messages.server_common_messages import *
+from isc.ddns.logger import ClientFormatter
+import errno
+import socket
+import struct
+
+class DNSTCPSendBuffer:
+ '''A composite buffer for a DNS message sent over TCP.
+
+ This class encapsulates binary data supposed to be a complete DNS
+ message, taking into account the 2-byte length field preceeding the
+ actual data.
+
+ An object of this class is constructed with a binary object for the
+ DNS message data (in wire-format), conceptually "appended" to the
+ 2-byte length field. The length is automatically calculated and
+ converted to the wire-format data in the network byte order.
+
+ Its get_data() method returns a binary object corresponding to the
+ consecutive region of the conceptual buffer starting from the specified
+ position. The returned region may not necessarily contain all remaining
+ data from the specified position; this class can internally hold multiple
+ separate binary objects to represent the conceptual buffer, and,
+ in that case, get_data() identifies the object that contains the
+ specified position of data, and returns the longest consecutive region
+ from that position. So the caller must call get_data(), incrementing
+ the position as it transmits the data, until it gets None.
+
+ This class is primarily intended to be a private utility for the
+ DNSTCPContext class, but can be used by other general applications
+ that need to send DNS messages over TCP in their own way.
+
+ '''
+ def __init__(self, data):
+ '''Consructor.
+
+ Parameter:
+ data (binary): A binary sequence that is supposed to be a
+ complete DNS message in the wire format. It must not
+ exceed 65535 bytes in length; otherwise ValueError will be
+ raised. This class does not check any further validity on
+ the data as a DNS message.
+
+ '''
+ self.__data_size = len(data)
+ self.__len_size = 2 # fixed length
+ if self.__data_size > 0xffff:
+ raise ValueError('Too large data for DNS/TCP, size: ' +
+ str(self.__data_size))
+ self.__lenbuf = struct.pack('H', socket.htons(self.__data_size))
+ self.__databuf = data
+
+ def get_total_len(self):
+ '''Return the total length of the buffer, including the length field.
+
+ '''
+ return self.__data_size + self.__len_size
+
+ def get_data(self, pos):
+ '''Return a portion of data from a specified position.
+
+ Parameter:
+ pos (int): The position in the TCP DNS message data (including
+ the 2-byte length field) from which the data are to be returned.
+
+ Return:
+ A Python binary object that corresponds to a part of the TCP
+ DNS message data starting at the specified position. It may
+ or may not contain all remaining data from that position.
+ If the given position is beyond the end of the enrire data,
+ None will be returned.
+
+ '''
+ if pos >= self.__len_size:
+ pos -= self.__len_size
+ if pos >= self.__data_size:
+ return None
+ return self.__databuf[pos:]
+ return self.__lenbuf[pos:]
+
+class DNSTCPContextError(Exception):
+ '''An exception raised against logic errors in DNSTCPContext.
+
+ This is raised only when the context class is used in an unexpected way,
+ that is for a caller's bug.
+
+ '''
+ pass
+
+class DNSTCPContext:
+ '''Context of a TCP connection used for DNS transactions.
+
+ This class offers the following services:
+ - Handle the initial 2-byte length field internally. The user of
+ this class only has to deal with the bare DNS message (just like
+ the one transmiited over UDP).
+ - Asynchronous I/O. It supports the non blocking operation mode,
+ where method calls never block. The caller is told whether it's
+ ongoing and it should watch the socket or it's fully completed.
+ - Socket error handling: it internally catches socket related exceptions
+ and handle them in an appropriate way. A fatal error will be reported
+ to the caller in the form of a normal return value. The application
+ of this class can therefore assume it's basically exception free.
+
+ Notes:
+ - the initial implementation only supports non blocking mode, but
+ it's intended to be extended so it can work in both blocking or
+ non blocking mode as we see the need for it.
+ - the initial implementation only supports send operations on an
+ already connected socket, but the intent is to extend this class
+ so it can handle receive or connect operations.
+
+ '''
+
+ # Result codes used in send()/send_ready() methods.
+ SEND_DONE = 1
+ SENDING = 2
+ CLOSED = 3
+
+ def __init__(self, sock):
+ '''Constructor.
+
+ Parameter:
+ sock (Python socket): the socket to be used for the transaction.
+ It must represent a TCP socket; otherwise DNSTCPContextError
+ will be raised. It's also expected to be connected, but it's
+ not checked on construction; a subsequent send operation would
+ fail.
+
+ '''
+ if sock.proto != socket.IPPROTO_TCP:
+ raise DNSTCPContextError('not a TCP socket, proto: ' +
+ str(sock.proto))
+ sock.setblocking(False)
+ self.__sock = sock
+ self.__send_buffer = None
+ self.__remote_addr = sock.getpeername() # record it for logging
+
+ def send(self, data):
+ '''Send a DNS message.
+
+ In the non blocking mode, it sends as much data as possible via
+ the underlying TCP socket until it would block or all data are sent
+ out, and returns the corresponding result code. This method
+ therefore doesn't block in this mode.
+
+ Note: the initial implementation only works in the non blocking
+ mode.
+
+ This method must not be called once an error is detected and
+ CLOSED is returned or a prior send attempt is ongoing (with
+ the result code of SENDING); otherwise DNSTCPContextError is
+ raised.
+
+ Parameter:
+ data (binary): A binary sequence that is supposed to be a
+ complete DNS message in the wire format. It must meet
+ the assumption that DNSTCPSendBuffer requires.
+
+ Return:
+ An integer constant representing the result:
+ - SEND_DONE All data have been sent out successfully.
+ - SENDING All writable data has been sent out, and further
+ attempt would block at the moment. The caller is expected
+ to detect it when the underlying socket is writable again
+ and call send_ready() to continue the send.
+ - CLOSED A network error happened before the send operation is
+ completed. The underlying socket has been closed, and this
+ context object will be unusable.
+
+ '''
+ if self.__sock is None:
+ raise DNSTCPContextError('send() called after close')
+ if self.__send_buffer is not None:
+ raise DNSTCPContextError('duplicate send()')
+
+ self.__send_buffer = DNSTCPSendBuffer(data)
+ self.__send_marker = 0
+ return self.__do_send()
+
+ def send_ready(self):
+ '''Resume sending a DNS message.
+
+ This method is expected to be called followed by a send() call or
+ another send_ready() call that resulted in SENDING, when the caller
+ detects the underlying socket becomes writable. It works as
+ send() except that it continues the send operation from the suspended
+ position of the data at the time of the previous call.
+
+ This method must not be called once an error is detected and
+ CLOSED is returned or a send() method hasn't been called to
+ start the operation; otherwise DNSTCPContextError is raised.
+
+ Return: see send().
+
+ '''
+ if self.__sock is None:
+ raise DNSTCPContextError('send() called after close')
+ if self.__send_buffer is None:
+ raise DNSTCPContextError('send_ready() called before send')
+
+ return self.__do_send()
+
+ def __do_send(self):
+ while True:
+ data = self.__send_buffer.get_data(self.__send_marker)
+ if data is None:
+ # send complete; clear the internal buffer for next possible
+ # send.
+ logger.debug(logger.DBGLVL_TRACE_DETAIL,
+ PYSERVER_COMMON_DNS_TCP_SEND_DONE,
+ ClientFormatter(self.__remote_addr),
+ self.__send_marker)
+ self.__send_buffer = None
+ self.__send_marker = 0
+ return self.SEND_DONE
+ try:
+ cc = self.__sock.send(data)
+ except socket.error as ex:
+ total_len = self.__send_buffer.get_total_len()
+ if ex.errno == errno.EAGAIN:
+ logger.debug(logger.DBGLVL_TRACE_DETAIL,
+ PYSERVER_COMMON_DNS_TCP_SEND_PENDING,
+ ClientFormatter(self.__remote_addr),
+ self.__send_marker, total_len)
+ return self.SENDING
+ logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_ERROR,
+ ClientFormatter(self.__remote_addr),
+ self.__send_marker, total_len, ex)
+ self.__sock.close()
+ self.__sock = None
+ return self.CLOSED
+ self.__send_marker += cc
+
+ def close(self):
+ '''Close the socket.
+
+ This method closes the underlying socket. Once called, the context
+ object is effectively useless; any further method call would result
+ in a DNSTCPContextError exception.
+
+ The underlying socket will be automatically (and implicitly) closed
+ when this object is deallocated, but Python seems to expect socket
+ objects should be explicitly closed before deallocation. So it's
+ generally advisable for the user of this object to call this method
+ explictily when it doesn't need the context.
+
+ This method can be called more than once or can be called after
+ other I/O related methods have returned CLOSED; it's compatible
+ with the close() method of the Python socket class.
+
+ '''
+ if self.__sock is None:
+ return
+ self.__sock.close()
+ self.__sock = None # prevent furhter operation
diff --git a/src/lib/python/isc/server_common/logger.py b/src/lib/python/isc/server_common/logger.py
new file mode 100644
index 0000000..7451e05
--- /dev/null
+++ b/src/lib/python/isc/server_common/logger.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Common definitions regarding logging for the server_common package.'''
+
+import isc.log
+
+logger = isc.log.Logger("server_common")
diff --git a/src/lib/python/isc/server_common/server_common_messages.mes b/src/lib/python/isc/server_common/server_common_messages.mes
new file mode 100644
index 0000000..9eab129
--- /dev/null
+++ b/src/lib/python/isc/server_common/server_common_messages.mes
@@ -0,0 +1,65 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the config_messages python module.
+
+# since these messages are for the python server_common library, care must
+# be taken that names do not conflict with the messages from the c++
+# server_common library. A checker script should verify that, but we do not
+# have that at this moment. So when adding a message, make sure that
+# the name is not already used in src/lib/config/config_messages.mes
+
+% PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR Invalid name when parsing Auth configuration: %1
+There was an invalid name when parsing Auth configuration.
+
+% PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR Invalid RRClass when parsing Auth configuration: %1
+There was an invalid RR class when parsing Auth configuration.
+
+% PYSERVER_COMMON_DNS_TCP_SEND_DONE completed sending TCP message to %1 (%2 bytes in total)
+Debug message. A complete DNS message has been successfully
+transmitted over a TCP connection, possibly after multiple send
+operations. The destination address and the total size of the message
+(including the 2-byte length field) are shown in the log message.
+
+% PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4
+A DNS message has been attempted to be sent out over a TCP connection,
+but it failed due to some network error. Although it's not expected
+to happen too often, it can still happen for various reasons. The
+administrator may want to examine the cause of the failure, which is
+included in the log message, to see if it requires some action to
+be taken at the server side. When this message is logged, the
+corresponding TCP connection was closed immediately after the error
+was detected.
+
+% PYSERVER_COMMON_DNS_TCP_SEND_PENDING sent part TCP message to %1 (up to %2/%3 bytes)
+Debug message. A part of DNS message has been transmitted over a TCP
+connection, and it's suspended because further attempt would block.
+The destination address and the total size of the message that has
+been transmitted so far (including the 2-byte length field) are shown
+in the log message.
+
+% PYSERVER_COMMON_TSIG_KEYRING_DEINIT Deinitializing global TSIG keyring
+A debug message noting that the global TSIG keyring is being removed from
+memory. Most programs don't do that, they just exit, which is OK.
+
+% PYSERVER_COMMON_TSIG_KEYRING_INIT Initializing global TSIG keyring
+A debug message noting the TSIG keyring storage is being prepared. It should
+appear at most once in the lifetime of a program. The keyring still needs
+to be loaded from configuration.
+
+% PYSERVER_COMMON_TSIG_KEYRING_UPDATE Updating global TSIG keyring
+A debug message. The TSIG keyring is being (re)loaded from configuration.
+This happens at startup or when the configuration changes. The old keyring
+is removed and new one created with all the keys.
diff --git a/src/lib/python/isc/server_common/tests/Makefile.am b/src/lib/python/isc/server_common/tests/Makefile.am
new file mode 100644
index 0000000..82cd854
--- /dev/null
+++ b/src/lib/python/isc/server_common/tests/Makefile.am
@@ -0,0 +1,25 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = tsig_keyring_test.py dns_tcp_test.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
diff --git a/src/lib/python/isc/server_common/tests/dns_tcp_test.py b/src/lib/python/isc/server_common/tests/dns_tcp_test.py
new file mode 100644
index 0000000..7e74c04
--- /dev/null
+++ b/src/lib/python/isc/server_common/tests/dns_tcp_test.py
@@ -0,0 +1,246 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Tests for isc.server_common.dns_tcp'''
+
+import isc.log
+from isc.server_common.dns_tcp import *
+import socket
+import errno
+import unittest
+
+def check_length_field(assert_eq, len_data, expected_len):
+ # Examine the "length field" part of the data. It should be 2-byte field,
+ # and (in our implementation) always given as a separate chunk of data.
+ # The 16-bit length value of the actual data should be stored in the
+ # network byte order.
+ len_high = (expected_len >> 8) & 0x00ff
+ len_low = (expected_len & 0x00ff)
+ assert_eq(2, len(len_data))
+ assert_eq(len_high, len_data[0])
+ assert_eq(len_low, len_data[1])
+
+class BufferTest(unittest.TestCase):
+ def check_length_field(self, buf, expected_len):
+ '''Common subtest for the main tests that checks the length buffer.'''
+ check_length_field(self.assertEqual, buf.get_data(0), expected_len)
+
+ # Confirm the get_data(1) returns the latter half of the (partial)
+ # buffer.
+ self.assertEqual(1, len(buf.get_data(1)))
+ self.assertEqual(expected_len & 0x00ff, buf.get_data(1)[0])
+
+ def test_small_data(self):
+ # The smallest size (in practice) of data: that of the header field.
+ expected_data = b'x' * 12
+ buf = DNSTCPSendBuffer(expected_data)
+ self.check_length_field(buf, 12)
+
+ self.assertEqual(expected_data, buf.get_data(2))
+ self.assertEqual(b'x' * 11, buf.get_data(3))
+ self.assertEqual(None, buf.get_data(14))
+
+ def test_large_data(self):
+ # Test with an arbitrarily large size of data.
+ buf = DNSTCPSendBuffer(b'x' * 65534)
+ self.check_length_field(buf, 65534)
+ self.assertEqual(b'x' * 65534, buf.get_data(2))
+ self.assertEqual(b'x' * 2, buf.get_data(65534))
+ self.assertEqual(None, buf.get_data(65536))
+
+ def test_largest_data(self):
+ # This is the largest possible size of DNS message.
+ buf = DNSTCPSendBuffer(b'y' * 65535)
+ self.check_length_field(buf, 65535)
+ self.assertEqual(b'y', buf.get_data(65536))
+ self.assertEqual(None, buf.get_data(65537))
+
+ def test_too_large_data(self):
+ # The maximum possible size of a valid DNS message is 65535.
+ # Beyond that, the buffer construction should result in an exception.
+ self.assertRaises(ValueError, DNSTCPSendBuffer, b'0' * 65536)
+
+ def test_empty_data(self):
+ # Unusual, but it's not rejected
+ buf = DNSTCPSendBuffer(b'')
+ self.check_length_field(buf, 0)
+ self.assertEqual(None, buf.get_data(2))
+
+ def test_get_total_len(self):
+ self.assertEqual(14, DNSTCPSendBuffer(b'x' * 12).get_total_len())
+ self.assertEqual(2, DNSTCPSendBuffer(b'').get_total_len())
+ self.assertEqual(65537, DNSTCPSendBuffer(b'X' * 65535).get_total_len())
+
+class FakeSocket:
+ '''Emulating python socket w/o involving IO while allowing inspection.'''
+ def __init__(self, proto=socket.IPPROTO_TCP):
+ self._setblocking_val = None # record the latest value of setblocking()
+ self._closed = False # set to True on close()
+ self._sent_data = [] # record the transmitted data in send()
+ self._send_buflen = None # capacity of the faked "send buffer";
+ # None means infinity, -1 means "closed"
+ self._send_cc = 0 # waterline of the send buffer
+ self.proto = proto # protocol (should be TCP, but can be faked)
+
+ def setblocking(self, on):
+ self._setblocking_val = on
+
+ def close(self):
+ self._closed = True
+
+ def send(self, data):
+ # Calculate the available space in the "send buffer"
+ if self._send_buflen == -1:
+ raise socket.error(errno.EPIPE, "Broken pipe")
+ elif self._send_buflen is None:
+ available_space = len(data)
+ else:
+ available_space = self._send_buflen - self._send_cc
+ if available_space == 0:
+ # if there's no space, (assuming it's nonblocking mode) raise
+ # EAGAIN.
+ raise socket.error(errno.EAGAIN,
+ "Resource temporarily unavailable")
+ # determine the sendable part of the data, record it, update "buffer".
+ cc = min(available_space, len(data))
+ self._sent_data.append(data[:cc])
+ self._send_cc += cc
+ return cc
+
+ def make_send_ready(self):
+ # pretend that the accrued data has been cleared, making room in
+ # the send buffer.
+ self._send_cc = 0
+
+ def getpeername(self):
+ '''Return faked remote address'''
+ return ('2001:db8::1', 53000, 0, 0)
+
+class ContextTest(unittest.TestCase):
+ def setUp(self):
+ self.__sock = FakeSocket()
+ # there should be no setblocking value on the fake socket by default.
+ self.assertEqual(None, self.__sock._setblocking_val)
+ self.__ctx = DNSTCPContext(self.__sock)
+ # dummy data that has the same length as the DNS header section:
+ self.__test_data = b'x' * 12
+
+ def test_initialization(self):
+ # Creating a context (in setUp) should make the socket non-blocking.
+ self.assertFalse(self.__sock._setblocking_val)
+
+ # Only a TCP socket is accepted.
+ self.assertRaises(DNSTCPContextError, DNSTCPContext,
+ FakeSocket(proto=socket.IPPROTO_UDP))
+
+ def test_send_all(self):
+ # By default, a single send() call will send out all data by 2
+ # send() calls: one for the 2-byte length data and the other for the
+ # actual data.
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.__ctx.send(self.__test_data))
+ self.assertEqual(2, len(self.__sock._sent_data))
+ check_length_field(self.assertEqual, self.__sock._sent_data[0],
+ len(self.__test_data))
+ self.assertEqual(self.__test_data, self.__sock._sent_data[1])
+
+ def test_send_divided(self):
+ # set the "send buffer" of the fake socket to 7 (half of the size of
+ # len + data).
+ self.__sock._send_buflen = 7
+
+ # The initial send() can only send out the half of the data in
+ # two calls to socket.send(): the first one for the length field,
+ # and the other is for the first 5 bytes of the data
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__ctx.send(self.__test_data))
+ self.assertEqual(2, len(self.__sock._sent_data))
+ check_length_field(self.assertEqual, self.__sock._sent_data[0],
+ len(self.__test_data))
+ self.assertEqual(self.__test_data[:5], self.__sock._sent_data[1])
+
+ # "flush" the send buffer of the fake socket
+ self.__sock.make_send_ready()
+
+ # send_ready() can now complete the send. The remaining data should
+ # have been passed.
+ self.assertEqual(DNSTCPContext.SEND_DONE, self.__ctx.send_ready())
+ self.assertEqual(3, len(self.__sock._sent_data))
+ self.assertEqual(self.__test_data[5:], self.__sock._sent_data[2])
+
+ def test_send_multi(self):
+ # On a successful completion of send, another send can be done.
+ for i in (0, 2):
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.__ctx.send(self.__test_data))
+ self.assertEqual(i + 2, len(self.__sock._sent_data))
+ check_length_field(self.assertEqual, self.__sock._sent_data[i],
+ len(self.__test_data))
+ self.assertEqual(self.__test_data, self.__sock._sent_data[i + 1])
+
+ def test_send_reset(self):
+ # the connection will be "reset" before the initial send.
+ # send() should return CLOSED, and the underlying socket should be
+ # closed.
+ self.__sock._send_buflen = -1
+ self.assertEqual(DNSTCPContext.CLOSED,
+ self.__ctx.send(self.__test_data))
+ self.assertTrue(self.__sock._closed)
+
+ # Once closed, send() cannot be called any more
+ self.assertRaises(DNSTCPContextError, self.__ctx.send,
+ self.__test_data)
+ # Calling close() is okay (it's NO-OP)
+ self.__ctx.close()
+
+ def test_send_divided_reset(self):
+ # Similar to send_reset, but send() succeeds, and then the connection
+ # will be "reset".
+ self.__sock._send_buflen = 7
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__ctx.send(self.__test_data))
+ self.__sock._send_buflen = -1
+ self.assertEqual(DNSTCPContext.CLOSED, self.__ctx.send_ready())
+ self.assertTrue(self.__sock._closed)
+
+ # Once closed, send_ready() cannot be called any more
+ self.assertRaises(DNSTCPContextError, self.__ctx.send_ready)
+
+ def test_duplicate_send(self):
+ # send() cannot be called until it's completed
+ self.__sock._send_buflen = 7
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__ctx.send(self.__test_data))
+ self.assertRaises(DNSTCPContextError, self.__ctx.send,
+ self.__test_data)
+
+ def test_skip_send(self):
+ # send_ready() cannot be called before send().
+ self.assertRaises(DNSTCPContextError, self.__ctx.send_ready)
+
+ def test_close(self):
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.__ctx.send(self.__test_data))
+ self.__ctx.close()
+ self.assertTrue(self.__sock._closed)
+
+ # Duplicate close is just ignored, and the socket is still closed.
+ self.__ctx.close()
+ self.assertTrue(self.__sock._closed)
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/server_common/tests/tsig_keyring_test.py b/src/lib/python/isc/server_common/tests/tsig_keyring_test.py
new file mode 100644
index 0000000..e9a2174
--- /dev/null
+++ b/src/lib/python/isc/server_common/tests/tsig_keyring_test.py
@@ -0,0 +1,193 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Tests for isc.server_common.tsig_keyring.
+"""
+
+import unittest
+import isc.log
+from isc.server_common.tsig_keyring import *
+import isc.dns
+from isc.testutils.ccsession_mock import MockModuleCCSession
+
+class Session(MockModuleCCSession):
+ """
+ A class pretending to be the config session.
+ """
+ def __init__(self):
+ MockModuleCCSession.__init__(self)
+ self._name = None
+ self._callback = None
+ self._remove_name = None
+ self._data = None
+
+ def add_remote_config_by_name(self, name, callback):
+ self._name = name
+ self._callback = callback
+
+ def remove_remote_config(self, name):
+ self._remove_name = name
+
+ def get_remote_config_value(self, module, name):
+ if module != 'tsig_keys' or name != 'keys':
+ raise Exception("Asked for bad data element")
+ return (self._data, False)
+
+class TSIGKeyRingTest(unittest.TestCase):
+ """
+ Tests for the isc.server_common.tsig_keyring module.
+ """
+ def setUp(self):
+ self.__session = Session()
+ self.__sha1name = isc.dns.Name('hmac-sha1')
+ self.__md5name = isc.dns.Name('hmac-md5.sig-alg.reg.int')
+
+ def tearDown(self):
+ deinit_keyring()
+
+ def __do_init(self):
+ init_keyring(self.__session)
+ # Some initialization happened
+ self.assertEqual('tsig_keys', self.__session._name)
+
+ def test_initialization(self):
+ """
+ Test we can initialize and deintialize the keyring. It also
+ tests the interaction with the keyring() function.
+ """
+ # The keyring function raises until initialized
+ self.assertRaises(Unexpected, get_keyring)
+ self.__do_init()
+ current_keyring = get_keyring()
+ self.assertTrue(isinstance(current_keyring, isc.dns.TSIGKeyRing))
+ # Another initialization does nothing
+ self.__do_init()
+ self.assertEqual(current_keyring, get_keyring())
+ # When we deinitialize it, it no longer provides the keyring
+ deinit_keyring()
+ self.assertEqual('tsig_keys', self.__session._remove_name)
+ self.__session._remove_name = None
+ self.assertRaises(Unexpected, get_keyring)
+ # Another deinitialization doesn't change anything
+ deinit_keyring()
+ self.assertRaises(Unexpected, get_keyring)
+ self.assertIsNone(self.__session._remove_name)
+ # Test we can init it again (not expected, but not forbidden)
+ self.__do_init()
+ self.assertTrue(isinstance(get_keyring(), isc.dns.TSIGKeyRing))
+
+ def test_load(self):
+ """
+ Test it can load the keys from the configuration and reload them
+ when the data change.
+ """
+ # Initial load
+ self.__session._data = ['key:MTIzNAo=:hmac-sha1']
+ self.__do_init()
+ keys = get_keyring()
+ self.assertEqual(1, keys.size())
+ (rcode, key) = keys.find(isc.dns.Name('key'), self.__sha1name)
+ self.assertEqual(isc.dns.TSIGKeyRing.SUCCESS, rcode)
+ self.assertEqual(isc.dns.Name('key'), key.get_key_name())
+ # There's a change in the configuration
+ # (The key has a different name)
+ self.__session._data = ['key.example:MTIzNAo=:hmac-sha1']
+ self.__session._callback()
+ orig_keys = keys
+ keys = get_keyring()
+ self.assertNotEqual(keys, orig_keys)
+ self.assertEqual(1, keys.size())
+ # The old key is not here
+ (rcode, key) = keys.find(isc.dns.Name('key'), self.__sha1name)
+ self.assertEqual(isc.dns.TSIGKeyRing.NOTFOUND, rcode)
+ self.assertIsNone(key)
+ # But the new one is
+ (rcode, key) = keys.find(isc.dns.Name('key.example'), self.__sha1name)
+ self.assertEqual(isc.dns.TSIGKeyRing.SUCCESS, rcode)
+ self.assertEqual(isc.dns.Name('key.example'), key.get_key_name())
+
+ def test_empty_update(self):
+ """
+ Test an update that doesn't carry the correct element doesn't change
+ anything.
+ """
+ self.__session._data = ['key:MTIzNAo=:hmac-sha1']
+ self.__do_init()
+ keys = get_keyring()
+ self.__session._data = None
+ self.__session._callback()
+ self.assertEqual(keys, get_keyring())
+
+ def test_no_keys_update(self):
+ """
+ Test we can update the keyring to be empty.
+ """
+ self.__session._data = ['key:MTIzNAo=:hmac-sha1']
+ self.__do_init()
+ keys = get_keyring()
+ self.assertEqual(1, keys.size())
+ self.__session._data = []
+ self.__session._callback()
+ keys = get_keyring()
+ self.assertEqual(0, keys.size())
+
+ def test_update_multi(self):
+ """
+ Test we can handle multiple keys in startup/update.
+ """
+ # Init
+ self.__session._data = ['key:MTIzNAo=:hmac-sha1', 'key2:MTIzNAo=']
+ self.__do_init()
+ keys = get_keyring()
+ self.assertEqual(2, keys.size())
+ (rcode, key) = keys.find(isc.dns.Name('key'), self.__sha1name)
+ self.assertEqual(isc.dns.TSIGKeyRing.SUCCESS, rcode)
+ self.assertEqual(isc.dns.Name('key'), key.get_key_name())
+ (rcode, key) = keys.find(isc.dns.Name('key2'), self.__md5name)
+ self.assertEqual(isc.dns.TSIGKeyRing.SUCCESS, rcode)
+ self.assertEqual(isc.dns.Name('key2'), key.get_key_name())
+ # Update
+ self.__session._data = ['key1:MTIzNAo=:hmac-sha1', 'key3:MTIzNAo=']
+ self.__session._callback()
+ keys = get_keyring()
+ self.assertEqual(2, keys.size())
+ (rcode, key) = keys.find(isc.dns.Name('key1'), self.__sha1name)
+ self.assertEqual(isc.dns.TSIGKeyRing.SUCCESS, rcode)
+ self.assertEqual(isc.dns.Name('key1'), key.get_key_name())
+ (rcode, key) = keys.find(isc.dns.Name('key3'), self.__md5name)
+ self.assertEqual(isc.dns.TSIGKeyRing.SUCCESS, rcode)
+ self.assertEqual(isc.dns.Name('key3'), key.get_key_name())
+
+ def test_update_bad(self):
+ """
+ Test it raises on bad updates and doesn't change anything.
+ """
+ self.__session._data = ['key:MTIzNAo=:hmac-sha1']
+ self.__do_init()
+ keys = get_keyring()
+ # Bad TSIG string
+ self.__session._data = ['key:this makes no sense:really']
+ self.assertRaises(isc.dns.InvalidParameter, self.__session._callback)
+ self.assertEqual(keys, get_keyring())
+ # A duplicity
+ self.__session._data = ['key:MTIzNAo=:hmac-sha1', 'key:MTIzNAo=:hmac-sha1']
+ self.assertRaises(AddError, self.__session._callback)
+ self.assertEqual(keys, get_keyring())
+
+if __name__ == "__main__":
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/server_common/tsig_keyring.py b/src/lib/python/isc/server_common/tsig_keyring.py
new file mode 100644
index 0000000..de3b759
--- /dev/null
+++ b/src/lib/python/isc/server_common/tsig_keyring.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module conveniently keeps a copy of TSIG keyring loaded from the
+tsig_keys module.
+"""
+
+import isc.dns
+import isc.log
+from isc.server_common.logger import logger
+from isc.log_messages.server_common_messages import *
+
+updater = None
+
+class Unexpected(Exception):
+ """
+ Raised when an unexpected operation is requested by the user of this
+ module. For example if calling keyring() before init_keyring().
+ """
+ pass
+
+class AddError(Exception):
+ """
+ Raised when a key can not be added. This usually means there's a
+ duplicate.
+ """
+ pass
+
+class Updater:
+ """
+ The updater of tsig key ring. Not to be used directly.
+ """
+ def __init__(self, session):
+ """
+ Constructor. Pass the ccsession object so the key ring can be
+ downloaded.
+ """
+ logger.debug(logger.DBGLVL_TRACE_BASIC,
+ PYSERVER_COMMON_TSIG_KEYRING_INIT)
+ self.__session = session
+ self.__keyring = isc.dns.TSIGKeyRing()
+ session.add_remote_config_by_name('tsig_keys', self.__update)
+ self.__update()
+
+ def __update(self, value=None, module_cfg=None):
+ """
+ Update the key ring by the configuration.
+
+ Note that this function is used as a callback, but can raise
+ on bad data. The bad data is expected to be handled by the
+ configuration plugin and not be allowed as far as here.
+
+ The parameters are there just to match the signature which
+ the callback should have (i.e. they are ignored).
+ """
+ logger.debug(logger.DBGLVL_TRACE_BASIC,
+ PYSERVER_COMMON_TSIG_KEYRING_UPDATE)
+ (data, _) = self.__session.get_remote_config_value('tsig_keys', 'keys')
+ if data is not None: # There's an update
+ keyring = isc.dns.TSIGKeyRing()
+ for key_data in data:
+ key = isc.dns.TSIGKey(key_data)
+ if keyring.add(key) != isc.dns.TSIGKeyRing.SUCCESS:
+ raise AddError("Can't add key " + str(key))
+ self.__keyring = keyring
+
+ def get_keyring(self):
+ """
+ Return the current key ring.
+ """
+ return self.__keyring
+
+ def deinit(self):
+ """
+ Unregister from getting updates. The object will not be
+ usable any more after this.
+ """
+ logger.debug(logger.DBGLVL_TRACE_BASIC,
+ PYSERVER_COMMON_TSIG_KEYRING_DEINIT)
+ self.__session.remove_remote_config('tsig_keys')
+
+def get_keyring():
+ """
+ Get the current key ring. You need to call init_keyring first.
+ """
+ if updater is None:
+ raise Unexpected("You need to initialize the keyring first by " +
+ "init_keyring()")
+ return updater.get_keyring()
+
+def init_keyring(session):
+ """
+ Initialize the key ring for future use. It does nothing if already
+ initialized.
+ """
+ global updater
+ if updater is None:
+ updater = Updater(session)
+
+def deinit_keyring():
+ """
+ Deinit key ring. Yoeu can no longer access keyring() after this.
+ Does nothing if not initialized.
+ """
+ global updater
+ if updater is not None:
+ updater.deinit()
+ updater = None
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 0b08257..7abc1bc 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1,4 +1,5 @@
-EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+EXTRA_DIST = __init__.py ccsession_mock.py parse_args.py tsigctx_mock.py \
+ rrset_utils.py
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/testutils/ccsession_mock.py b/src/lib/python/isc/testutils/ccsession_mock.py
new file mode 100644
index 0000000..5f88678
--- /dev/null
+++ b/src/lib/python/isc/testutils/ccsession_mock.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+class MockModuleCCSession():
+ """Fake ModuleCCSession with a minimal implementation as needed by the
+ tests. Currently this module only stores whether some methods have
+ been called on it (send_stopping(), and close())"""
+ def __init__(self):
+ """Will be set to True when send_stopping() is called"""
+ self.stopped = False
+ """Will be set to True when close() is called"""
+ self.closed = False
+
+ def send_stopping(self):
+ """Fake send_stopping() call. No message is sent, but only stores
+ that this method has been called."""
+ self.stopped = True
+
+ def close(self):
+ """Fake close() call. Nothing is closed, but only stores
+ that this method has been called."""
+ self.closed = True
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
new file mode 100644
index 0000000..7eac772
--- /dev/null
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Utility functions handling DNS RRsets commonly used for tests'''
+
+from isc.dns import *
+
+def rrsets_equal(a, b):
+ '''Compare two RRsets, return True if equal, otherwise False
+
+ We provide this function as part of test utils as we have no direct rrset
+ comparison atm. There's no accessor for sigs either (so this only checks
+ name, class, type, ttl, and rdata).
+ Also, since we often use fake data in RRSIGs, RRSIG RDATA are not checked.
+
+ '''
+ return a.get_name() == b.get_name() and \
+ a.get_class() == b.get_class() and \
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and \
+ (a.get_type() == RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# The following are short cut utilities to create an RRset of a specific
+# RR type with one RDATA. Many of the RR parameters are common in most
+# tests, so we define default values for them for convenience.
+
+def create_a(name, address, ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ return rrset
+
+def create_aaaa(name, address, ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+ return rrset
+
+def create_ns(nsname, name=Name('example.com'), ttl=3600):
+ '''For convenience we use a default name often used as a zone name'''
+ rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+ return rrset
+
+def create_cname(target='target.example.com', name=Name('example.com'),
+ ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+ return rrset
+
+def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
+ '''Create an RR of a general type with an arbitrary length of RDATA
+
+ If the RR type isn't specified, type of 65300 will be used, which is
+ arbitrarily chosen from the IANA "Reserved for Private Usage" range.
+ The RDATA will be filled with specified length of all-0 data.
+
+ '''
+ rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
+ rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+ str(rdlen) + ' ' + '00' * rdlen))
+ return rrset
+
+def create_soa(serial, name=Name('example.com'), ttl=3600):
+ '''For convenience we use a default name often used as a zone name'''
+
+ rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+ rdata_str = 'master.example.com. admin.example.com. ' + \
+ str(serial) + ' 3600 1800 2419200 7200'
+ rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+ return rrset
diff --git a/src/lib/python/isc/util/Makefile.am b/src/lib/python/isc/util/Makefile.am
index 140e221..3eaaa12 100644
--- a/src/lib/python/isc/util/Makefile.am
+++ b/src/lib/python/isc/util/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = . tests
+SUBDIRS = . cio tests
python_PYTHON = __init__.py process.py socketserver_mixin.py file.py
diff --git a/src/lib/python/isc/util/cio/Makefile.am b/src/lib/python/isc/util/cio/Makefile.am
new file mode 100644
index 0000000..0a2e735
--- /dev/null
+++ b/src/lib/python/isc/util/cio/Makefile.am
@@ -0,0 +1,41 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+python_PYTHON = __init__.py
+pythondir = $(PYTHON_SITEPKG_DIR)/isc/util/cio
+
+pyexec_LTLIBRARIES = socketsession.la
+pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/util/cio
+
+socketsession_la_SOURCES = socketsession_python.cc socketsession_python.h
+socketsession_la_SOURCES += socketsessionforwarder_python.cc
+socketsession_la_SOURCES += socketsessionforwarder_python.h
+socketsession_la_SOURCES += socketsessionreceiver_python.cc
+socketsession_la_SOURCES += socketsessionreceiver_python.h
+socketsession_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+socketsession_la_LDFLAGS = $(PYTHON_LDFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+socketsession_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+
+# Python prefers .so, while some OSes (specifically MacOS) use a different
+# suffix for dynamic objects. -module is necessary to work this around.
+socketsession_la_LDFLAGS += -module -avoid-version
+socketsession_la_LIBADD = $(top_builddir)/src/lib/util/io/libutil_io.la
+socketsession_la_LIBADD += $(PYTHON_LIB)
+
+# This is not installed, it helps locate the module during tests
+EXTRA_DIST = __init__.py socketsession.py
+
+EXTRA_DIST += socketsession_inc.cc
+EXTRA_DIST += socketsessionforwarder_inc.cc socketsessionreceiver_inc.cc
+
+CLEANFILES = __init__.pyc socketsession.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/util/cio/__init__.py b/src/lib/python/isc/util/cio/__init__.py
new file mode 100644
index 0000000..935160a
--- /dev/null
+++ b/src/lib/python/isc/util/cio/__init__.py
@@ -0,0 +1,3 @@
+"""
+Here are function and classes for forwarding socket sessions between processes.
+"""
diff --git a/src/lib/python/isc/util/cio/socketsession.py b/src/lib/python/isc/util/cio/socketsession.py
new file mode 100644
index 0000000..5017d90
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsession.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed. See python/isc/log/__init__.py for the trick.
+
+import os
+import sys
+
+for base in sys.path[:]:
+ libdir = os.path.join(base, 'isc/util/cio/.libs')
+ if os.path.exists(libdir):
+ sys.path.insert(0, libdir)
+
+from socketsession import *
diff --git a/src/lib/python/isc/util/cio/socketsession_inc.cc b/src/lib/python/isc/util/cio/socketsession_inc.cc
new file mode 100644
index 0000000..e200063
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsession_inc.cc
@@ -0,0 +1,122 @@
+namespace {
+const char* const socketsession_doc = "\
+This module defines a set of classes that support forwarding a\n\
+\"socket session\" from one process to another. A socket session is a\n\
+conceptual tuple of the following elements:\n\
+\n\
+- A network socket\n\
+- The local and remote endpoints of a (IP) communication taking place\n\
+ on the socket. In practice an endpoint is a pair of an IP address\n\
+ and TCP or UDP port number.\n\
+- Some amount of data sent from the remote endpoint and received on\n\
+ the socket. We call it (socket) session data in this documentation.\n\
+\n\
+Note that this is a conceptual definition. Depending on the underlying\n\
+implementation and/or the network protocol, some of the elements could\n\
+be part of others; for example, if it's an established TCP connection,\n\
+the local and remote endpoints would be able to be retrieved from the\n\
+socket using the standard getsockname() and getpeername() system\n\
+calls. But in this definition we separate these to be more generic.\n\
+Also, as a matter of fact our intended usage includes non-connected\n\
+UDP communications, in which case at least the remote endpoint should\n\
+be provided separately from the socket.\n\
+\n\
+In the actual implementation we represent a socket as a Python socket\n\
+object, which contains the information of the address family\n\
+(e.g. AF_INET6), socket type (e.g. SOCK_STREAM), and protocol\n\
+(e.g. IPPROTO_TCP).\n\
+\n\
+We use the Python socket address tuple to represent endpoints.\n\
+\n\
+Socket session data is an opaque blob in the form of a Python byte\n\
+object.\n\
+\n\
+To forward a socket session between processes, we use connected UNIX\n\
+domain sockets established between the processes. The file descriptor\n\
+will be forwarded through the sockets as an ancillary data item of\n\
+type SCM_RIGHTS. Other elements of the session will be transferred as\n\
+normal data over the connection.\n\
+\n\
+We provide two classes to help applications forward socket sessions:\n\
+SocketSessionForwarder is the sender of the UNIX domain connection,\n\
+while SocketSessionReceiver is the receiver (this interface assumes\n\
+one direction of forwarding).\n\
+\n\
+Note: this paragraph and following discussions on the internal\n\
+protocol are for reference purposes only; it's not necessary to\n\
+understand how to use the API.\n\
+SocketSessionForwarder and SocketSessionReceiver objects (internally)\n\
+use a straightforward protocol to pass elements of socket sessions.\n\
+Once the connection is established, the forwarder object first forwards\n\
+the file descriptor with 1-byte dummy data. It then forwards a\n\
+\"(socket) session header\", which contains all other elements of\n\
+the session except the file descriptor (already forwarded) and session\n\
+data. The wire format of the header is as follows:\n\
+\n\
+- The length of the header (16-bit unsigned integer)\n\
+- Address family\n\
+- Socket type\n\
+- Protocol\n\
+- Size of the local endpoint in bytes\n\
+- Local endpoint (a copy of the memory image of the corresponding\n\
+ sockaddr)\n\
+- Size of the remote endpoint in bytes\n\
+- Remote endpoint (same as local endpoint)\n\
+- Size of session data in bytes\n\
+\n\
+The type of the fields is 32-bit unsigned integer unless explicitly\n\
+noted, and all fields are formatted in the network byte order.\n\
+\n\
+The socket session data immediately follows the session header.\n\
+\n\
+Note that the fields do not necessarily be in the network byte order\n\
+because they are expected to be exchanged on the same machine.\n\
+Likewise, integer elements such as address family do not necessarily\n\
+be represented as an fixed-size value (i.e., 32-bit). But fixed size\n\
+fields are used in order to ensure maximum portability in such a\n\
+(rare) case where the forwarder and the receiver are built with\n\
+different compilers that have different definitions of int. Also,\n\
+since sockaddr fields are generally formatted in the network byte\n\
+order, other fields are defined so to be consistent.\n\
+\n\
+One basic assumption in the API of this module is socket sessions\n\
+should be forwarded without blocking, thus eliminating the need for\n\
+incremental read/write or blocking other important services such as\n\
+responding to requests from the application's clients. This assumption\n\
+should be held as long as both the forwarder and receiver have\n\
+sufficient resources to handle the forwarding process since the\n\
+communication is local. But a forward attempt could still block if the\n\
+receiver is busy (or even hang up) and cannot keep up with the volume\n\
+of incoming sessions.\n\
+\n\
+So, in this implementation, the forwarder uses non blocking writes to\n\
+forward sessions. If a write attempt could block, it immediately gives\n\
+up the operation with an exception. The corresponding application is\n\
+expected to catch it, close the connection, and perform any necessary\n\
+recovery for that application (that would normally be re-establish the\n\
+connection with a new receiver, possibly after confirming the\n\
+receiving side is still alive). On the other hand, the receiver\n\
+implementation assumes it's possible that it only receive incomplete\n\
+elements of a session (such as in the case where the forwarder writes\n\
+part of the entire session and gives up the connection). The receiver\n\
+implementation throws an exception when it encounters an incomplete\n\
+session. Like the case of the forwarder application, the receiver\n\
+application is expected to catch it, close the connection, and perform\n\
+any necessary recovery steps.\n\
+\n\
+Note that the receiver implementation uses blocking read. So it's\n\
+application's responsibility to ensure that there's at least some data\n\
+in the connection when the receiver object is requested to receive a\n\
+session (unless this operation can be blocking, e.g., by the use of a\n\
+separate thread). Also, if the forwarder implementation or application\n\
+is malicious or extremely buggy and intentionally sends partial\n\
+session and keeps the connection, the receiver could block in\n\
+receiving a session. In general, we assume the forwarder doesn't do\n\
+intentional blocking as it's a local node and is generally a module of\n\
+the same (BIND 10) system. The minimum requirement for the forwarder\n\
+implementation (and application) is to make sure the connection is\n\
+closed once it detects an error on it. Even a naive implementation\n\
+that simply dies due to the exception will meet this requirement.\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/util/cio/socketsession_python.cc b/src/lib/python/isc/util/cio/socketsession_python.cc
new file mode 100644
index 0000000..8fdfbc1
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsession_python.cc
@@ -0,0 +1,79 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include "socketsessionreceiver_python.h"
+#include "socketsessionforwarder_python.h"
+
+using namespace isc::util::io::python;
+using namespace isc::util::python;
+
+#include "socketsession_inc.cc"
+
+namespace isc {
+namespace util {
+namespace io {
+namespace python {
+PyObject* po_SocketSessionError;
+}
+}
+}
+}
+
+namespace {
+
+PyModuleDef socketsession = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "isc.util.cio.socketsession",
+ socketsession_doc,
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+} // end of unnamed namespace
+
+PyMODINIT_FUNC
+PyInit_socketsession(void) {
+ PyObject* mod = PyModule_Create(&socketsession);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ try {
+ po_SocketSessionError =
+ PyErr_NewException("isc.util.cio.SocketSessionError", NULL, NULL);
+ PyObjectContainer(po_SocketSessionError).
+ installToModule(mod, "SocketSessionError");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_SocketSessionForwarder(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+ if (!initModulePart_SocketSessionReceiver(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/util/cio/socketsession_python.h b/src/lib/python/isc/util/cio/socketsession_python.h
new file mode 100644
index 0000000..b0703ac
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsession_python.h
@@ -0,0 +1,35 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SOCKETSESSION_H
+#define __PYTHON_SOCKETSESSION_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace util {
+namespace io {
+namespace python {
+
+extern PyObject* po_SocketSessionError;
+
+} // namespace python
+} // namespace io
+} // namespace util
+} // namespace isc
+#endif // __PYTHON_SOCKETSESSION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/util/cio/socketsessionforwarder_inc.cc b/src/lib/python/isc/util/cio/socketsessionforwarder_inc.cc
new file mode 100644
index 0000000..6b9de01
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsessionforwarder_inc.cc
@@ -0,0 +1,136 @@
+namespace {
+// Modifications:
+// reference to the module description (instead of "utility")
+// exception description
+const char* const SocketSessionForwarder_doc = "\
+The forwarder of socket sessions.\n\
+\n\
+An object of this class maintains a UNIX domain socket (normally\n\
+expected to be connected to a SocketSessionReceiver object) and\n\
+forwards socket sessions to the receiver.\n\
+\n\
+See the description of socketsession module for other details of how\n\
+the session forwarding works.\n\
+\n\
+SocketSessionForwarder(unix_file)\n\
+\n\
+ The constructor.\n\
+\n\
+ It's constructed with path information of the intended receiver,\n\
+ but does not immediately establish a connection to the receiver;\n\
+ connect_to_receiver() must be called to establish it. These are\n\
+ separated so that an object of class can be initialized (possibly\n\
+ as an attribute of a higher level application class object)\n\
+ without knowing the receiver is ready for accepting new\n\
+ forwarders. The separate connect interface allows the object to be\n\
+ reused when it detects connection failure and tries to re-\n\
+ establish it after closing the failed one.\n\
+\n\
+ On construction, it also installs a signal filter for SIGPIPE to\n\
+ ignore it. Since this class uses a stream-type connected UNIX\n\
+ domain socket, if the receiver (abruptly) closes the connection a\n\
+ subsequent write operation on the socket would trigger a SIGPIPE\n\
+ signal, which kills the caller process by default. This behavior\n\
+ would be undesirable in many cases, so this implementation always\n\
+ disables the signal.\n\
+\n\
+ This approach has some drawbacks, however; first, since signal\n\
+ handling is process (or thread) wide, ignoring it may not what the\n\
+ application wants. On the other hand, if the application changes\n\
+ how the signal is handled after instantiating this class, the new\n\
+ behavior affects the class operation. Secondly, even if ignoring\n\
+ the signal is the desired operation, it's a waste to set the\n\
+ filter every time this class object is constructed. It's\n\
+ sufficient to do it once. We still adopt this behavior based on\n\
+ the observation that in most cases applications would like to\n\
+ ignore SIGPIPE (or simply doesn't care about it) and that this\n\
+ class is not instantiated so often (so the wasteful setting\n\
+ overhead should be marginal). On the other hand, doing it every\n\
+ time is beneficial if the application is threaded and different\n\
+ threads create different forwarder objects (and if signals work\n\
+ per thread).\n\
+\n\
+ Exceptions:\n\
+ SocketSessionError unix_file is invalid as a path name of a UNIX\n\
+ domain socket or error happens in setting a filter for\n\
+ SIGPIPE (see above)\n\
+ SystemError Unexpected errors such as resource allocation failure\n\
+\n\
+ Parameters:\n\
+ unix_file Path name of the receiver.\n\
+\n\
+";
+
+// Modifications:
+// exception description
+const char* const SocketSessionForwarder_connectToReceiver_doc = "\
+connect_to_receiver()\n\
+\n\
+Establish a connection to the receiver.\n\
+\n\
+This method establishes a connection to the receiver at the path given\n\
+on construction. It makes the underlying UNIX domain socket non\n\
+blocking, so this method (or subsequent push() calls) does not block.\n\
+\n\
+Exceptions:\n\
+ TypeError The method is called while an already established\n\
+ connection is still active.\n\
+ SocketSessionError A system error in socket operation.\n\
+ SystemError Unexpected errors such as resource allocation failure\n\
+\n\
+";
+
+// Modifications:
+// bullet description
+// parameters
+// exception description
+const char* const SocketSessionForwarder_push_doc = "\
+push(sock, family, type, protocol, local_end, remote_end, data)\n\
+\n\
+Forward a socket session to the receiver.\n\
+\n\
+This method takes a set of parameters that represent a single socket\n\
+session, renders them in the \"wire\" format according to the internal\n\
+protocol (see socketsession module) and forwards them to the\n\
+receiver through the UNIX domain connection.\n\
+\n\
+The connection must have been established by connect_to_receiver().\n\
+\n\
+For simplicity and for the convenience of detecting application\n\
+errors, this method imposes some restrictions on the parameters:\n\
+\n\
+- Socket family must be either AF_INET or AF_INET6\n\
+- The address family (sa_family) member of the local and remote end\n\
+ points must be equal to the family parameter\n\
+- Socket session data must not be empty\n\
+- Data length must not exceed 65535\n\
+\n\
+These are not architectural limitation, and might be loosened in future\n\
+versions as we see the need for flexibility.\n\
+\n\
+Since the underlying UNIX domain socket is non blocking (see the\n\
+description for the constructor), a call to this method should either\n\
+return immediately or result in exception (in case of \"would\n\
+block\").\n\
+\n\
+Exceptions:\n\
+ TypeError The method is called before establishing a connection or\n\
+ given parameters are invalid, or the given socket address\n\
+ is valid.\n\
+ SocketSessionError A system error in socket operation, including the\n\
+ case where the write operation would block.\n\
+\n\
+Parameters:\n\
+ sock (int) The socket file descriptor\n\
+ family (int) The address family (such as socket.AF_INET6) of the\n\
+ socket\n\
+ type (int) The socket type (such as socket.SOCK_DGRAM) of the\n\
+ socket\n\
+ protocol (int) The transport protocol (such as socket.IPPROTO_UDP)\n\
+ of the socket\n\
+ local_end (socket address) The local end point of the session\n\
+ remote_end (socket address) The remote end point of the session\n\
+ data (byte) the session data\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/util/cio/socketsessionforwarder_python.cc b/src/lib/python/isc/util/cio/socketsessionforwarder_python.cc
new file mode 100644
index 0000000..583a877
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsessionforwarder_python.cc
@@ -0,0 +1,309 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <util/io/sockaddr_util.h>
+#include <util/io/socketsession.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "socketsession_python.h"
+#include "socketsessionforwarder_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::util::io;
+using namespace isc::util::io::internal;
+using namespace isc::util::io::python;
+using boost::lexical_cast;
+
+// Trivial constructor.
+s_SocketSessionForwarder::s_SocketSessionForwarder() : cppobj(NULL) {
+}
+
+// Import pydoc text
+#include "socketsessionforwarder_inc.cc"
+
+// See python/isc/log/log.cc for the use of namespace
+namespace clang_unnamed_namespace_workaround {
+// Internal exception class thrown when address parsing fails
+class AddressParseError: public isc::Exception {
+public:
+ AddressParseError(const char *file, size_t line, const char *what):
+ isc::Exception(file, line, what) {}
+};
+}
+using namespace clang_unnamed_namespace_workaround;
+
+namespace {
+
+int
+SocketSessionForwarder_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_SocketSessionForwarder* self =
+ static_cast<s_SocketSessionForwarder*>(po_self);
+ try {
+ const char* unix_file;
+ if (PyArg_ParseTuple(args, "s", &unix_file)) {
+ self->cppobj = new SocketSessionForwarder(unix_file);
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to construct SocketSessionForwarder object: " +
+ string(ex.what());
+ PyErr_SetString(po_SocketSessionError, ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ return (-1);
+ }
+
+ return (-1);
+}
+
+void
+SocketSessionForwarder_destroy(PyObject* po_self) {
+ s_SocketSessionForwarder* self =
+ static_cast<s_SocketSessionForwarder*>(po_self);
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// Convert a Python socket address object to an addrinfo structure by
+// getaddrinfo.
+void
+parsePySocketAddress(PyObject* obj, int type, int protocol,
+ struct sockaddr_storage* ss)
+{
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_socktype = type;
+ hints.ai_protocol = protocol;
+ hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV;
+
+ const char* addr;
+ int port, flowinfo, scopeid;
+ struct addrinfo *res;
+ if (PyArg_ParseTuple(obj, "si", &addr, &port)) {
+ // Possibly an IPv4 address.
+ hints.ai_family = AF_INET;
+ const int error = getaddrinfo(addr,
+ lexical_cast<string>(port).c_str(),
+ &hints, &res);
+ if (error == 0) {
+ assert(res->ai_addrlen <= sizeof(*ss));
+ memcpy(ss, res->ai_addr, res->ai_addrlen);
+ return;
+ }
+ isc_throw(AddressParseError, "Invalid or unsupported socket address: "
+ << gai_strerror(error));
+ }
+ PyErr_Clear();
+ if (PyArg_ParseTuple(obj, "siii", &addr, &port, &flowinfo, &scopeid)) {
+ // Possibly an IPv6 address. We ignore flowinfo.
+ hints.ai_family = AF_INET6;
+ const int error = getaddrinfo(addr,
+ lexical_cast<string>(port).c_str(),
+ &hints, &res);
+ if (error == 0) {
+ assert(res->ai_addrlen <= sizeof(*ss));
+ memcpy(ss, res->ai_addr, res->ai_addrlen);
+ void* p = ss;
+ static_cast<struct sockaddr_in6*>(p)->sin6_scope_id = scopeid;
+ return;
+ }
+ isc_throw(AddressParseError, "Invalid or unsupported socket address: "
+ << gai_strerror(error));
+ }
+ PyErr_Clear();
+ isc_throw(AddressParseError, "Invalid or unsupported socket address, must "
+ "be AF_INET or AF_INET6 socket address.");
+}
+
+PyObject*
+SocketSessionForwarder_connectToReceiver(PyObject* po_self, PyObject*) {
+ s_SocketSessionForwarder* const self =
+ static_cast<s_SocketSessionForwarder*>(po_self);
+
+ try {
+ self->cppobj->connectToReceiver();
+ Py_RETURN_NONE;
+ } catch (const isc::BadValue& ex) {
+ PyErr_SetString(PyExc_TypeError, ex.what());
+ return (NULL);
+ } catch (const SocketSessionError& ex) {
+ PyErr_SetString(po_SocketSessionError, ex.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure in connecting to receiver: " +
+ string(ex.what());
+ PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+SocketSessionForwarder_push(PyObject* po_self, PyObject* args) {
+ s_SocketSessionForwarder* const self =
+ static_cast<s_SocketSessionForwarder*>(po_self);
+
+ try {
+ int fd, family, type, protocol;
+ PyObject* po_local_end;
+ PyObject* po_remote_end;
+ Py_buffer py_buf;
+
+ if (!PyArg_ParseTuple(args, "iiiiOOy*", &fd, &family, &type, &protocol,
+ &po_local_end, &po_remote_end, &py_buf)) {
+ return (NULL);
+ }
+ struct sockaddr_storage ss_local, ss_remote;
+ parsePySocketAddress(po_local_end, type, protocol, &ss_local);
+ parsePySocketAddress(po_remote_end, type, protocol, &ss_remote);
+ self->cppobj->push(fd, family, type, protocol,
+ *convertSockAddr(&ss_local),
+ *convertSockAddr(&ss_remote),
+ py_buf.buf, py_buf.len);
+ Py_RETURN_NONE;
+ } catch (const AddressParseError& ex) {
+ PyErr_SetString(PyExc_TypeError, ex.what());
+ return (NULL);
+ } catch (const isc::BadValue& ex) {
+ PyErr_SetString(PyExc_TypeError, ex.what());
+ return (NULL);
+ } catch (const SocketSessionError& ex) {
+ PyErr_SetString(po_SocketSessionError, ex.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ return (NULL);
+ }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef SocketSessionForwarder_methods[] = {
+ { "push", SocketSessionForwarder_push, METH_VARARGS,
+ SocketSessionForwarder_push_doc },
+ { "connect_to_receiver", SocketSessionForwarder_connectToReceiver,
+ METH_NOARGS, SocketSessionForwarder_connectToReceiver_doc },
+ { NULL, NULL, 0, NULL }
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace util {
+namespace io {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_SocketSessionForwarder
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject socketsessionforwarder_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.util.cio.SocketSessionForwarder",
+ sizeof(s_SocketSessionForwarder), // tp_basicsize
+ 0, // tp_itemsize
+ SocketSessionForwarder_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ SocketSessionForwarder_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ SocketSessionForwarder_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ SocketSessionForwarder_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+// Module Initialization, all statics are initialized here
+bool
+initModulePart_SocketSessionForwarder(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&socketsessionforwarder_type) < 0) {
+ return (false);
+ }
+ void* p = &socketsessionforwarder_type;
+ if (PyModule_AddObject(mod, "SocketSessionForwarder",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&socketsessionforwarder_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace io
+} // namespace util
+} // namespace isc
diff --git a/src/lib/python/isc/util/cio/socketsessionforwarder_python.h b/src/lib/python/isc/util/cio/socketsessionforwarder_python.h
new file mode 100644
index 0000000..2ce220a
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsessionforwarder_python.h
@@ -0,0 +1,45 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SOCKETSESSIONFORWARDER_H
+#define __PYTHON_SOCKETSESSIONFORWARDER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace util {
+namespace io {
+class SocketSessionForwarder;
+
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_SocketSessionForwarder : public PyObject {
+public:
+ s_SocketSessionForwarder();
+ SocketSessionForwarder* cppobj;
+};
+
+extern PyTypeObject socketsessionforwarder_type;
+
+bool initModulePart_SocketSessionForwarder(PyObject* mod);
+} // namespace python
+} // namespace io
+} // namespace util
+} // namespace isc
+#endif // __PYTHON_SOCKETSESSIONFORWARDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/util/cio/socketsessionreceiver_inc.cc b/src/lib/python/isc/util/cio/socketsessionreceiver_inc.cc
new file mode 100644
index 0000000..ed29d3e
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsessionreceiver_inc.cc
@@ -0,0 +1,89 @@
+namespace {
+// Modifications
+// - about return value
+// - socket session "utility" => module
+const char* const SocketSessionReceiver_doc = "\
+The receiver of socket sessions.\n\
+\n\
+An object of this class holds a UNIX domain socket for an established\n\
+connection, receives socket sessions from the remote forwarder, and\n\
+provides the session to the application as a tuple of corresponding\n\
+elements.\n\
+\n\
+Note that this class is instantiated with an already connected socket;\n\
+it's not a listening socket that is accepting connection requests from\n\
+forwarders. It's application's responsibility to create the listening\n\
+socket, listen on it and accept connections. Once the connection is\n\
+established, the application would construct a SocketSessionReceiver\n\
+object with the socket for the newly established connection. This\n\
+behavior is based on the design decision that the application should\n\
+decide when it performs (possibly) blocking operations (see\n\
+socketsession module for more details).\n\
+\n\
+See the description of socketsession module for other details of how\n\
+the session forwarding works.\n\
+\n\
+SocketSessionReceiver(socket)\n\
+\n\
+ The constructor.\n\
+\n\
+ Exceptions:\n\
+ TypeError The given parameter is not a valid socket object\n\
+ SocketSessionError Any error on an operation that is performed\n\
+ on the given socket as part of initialization.\n\
+ SystemError Unexpected errors such as resource allocation failure\n\
+\n\
+ Parameters:\n\
+ socket A python socket object of a UNIX domain family for an\n\
+ established connection with a forwarder.\n\
+\n\
+";
+
+// Modifications
+// - socket session utility -> module
+// - return value (not a SocketSession object, but a Python tuple)
+// - remove the validity note (we copy it here, so there's no such
+// restriction)
+// - caller's responsibility: only responsible for closing the socket.
+// - text around the bullets
+// - exception
+const char* const SocketSessionReceiver_pop_doc = "\
+pop() -> (socket, socket address, socket address, byte)\n\
+\n\
+Receive a socket session from the forwarder.\n\
+\n\
+This method receives wire-format data (see socketsession module) for\n\
+a socket session on the UNIX domain socket, performs some validation\n\
+on the data, and returns the session information as a tuple.\n\
+\n\
+The caller is responsible for closing the received socket.\n\
+\n\
+It ensures the following:\n\
+\n\
+- The socket's address family is either AF_INET or AF_INET6\n\
+- The family element of the socket addresses for the local and remote\n\
+ end points must be equal to the socket's address family\n\
+- The socket session data is not empty and does not exceed 65535\n\
+ bytes.\n\
+\n\
+If the validation fails or an unexpected system error happens\n\
+(including a connection close in the meddle of reception), it throws\n\
+an SocketSessionError exception. When this happens, it's very\n\
+unlikely that a subsequent call to this method succeeds, so in\n\
+reality the application is expected to destruct it and close the\n\
+socket in such a case.\n\
+\n\
+Exceptions:\n\
+ SocketSessionError Invalid data is received or a system error on\n\
+ socket operation happens.\n\
+ SystemError Unexpected errors such as resource allocation failure\n\
+\n\
+Return Value(s): A tuple corresponding to the extracted socket session:\n\
+ socket A Python socket object corresponding to the socket passed\n\
+ by the forwarder\n\
+ socket address A Python socket address (which is a tuple) for the local\n\
+ end point\n\
+ socket address A Python socket address for the remote endpoint\n\
+ data A Python byte object that stores the session data\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/util/cio/socketsessionreceiver_python.cc b/src/lib/python/isc/util/cio/socketsessionreceiver_python.cc
new file mode 100644
index 0000000..c79f6e0
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsessionreceiver_python.cc
@@ -0,0 +1,327 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netdb.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <boost/lexical_cast.hpp>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <util/io/socketsession.h>
+
+#include "socketsession_python.h"
+#include "socketsessionreceiver_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::util::io;
+using namespace isc::util::io::python;
+using boost::lexical_cast;
+
+// Trivial constructor.
+s_SocketSessionReceiver::s_SocketSessionReceiver() : cppobj(NULL) {
+}
+
+// Import pydoc text
+#include "socketsessionreceiver_inc.cc"
+
+namespace {
+// This C structure corresponds to a Python callable object for
+// socket.fromfd().
+// See json_dumps_obj in dns_requestloader_python.cc for background rationale
+// of this trick.
+PyObject* socket_fromfd_obj = NULL;
+
+int
+SocketSessionReceiver_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_SocketSessionReceiver* self =
+ static_cast<s_SocketSessionReceiver*>(po_self);
+ try {
+ // The constructor expects a Python socket object. We'll extract
+ // the underlying file descriptor using the fileno method (in the
+ // duck typing manner) and pass it to the C++ constructor.
+ // PyObject_CallMethod() could return NULL (especially if the given
+ // object is of the wrong type and doesn't have the "fileno" method),
+ // in which case PyObjectContainer will detect it and throw
+ // PyCPPWrapperException, which will be converted to the Python
+ // TypeError below.
+ PyObject* po_sock;
+ if (PyArg_ParseTuple(args, "O", &po_sock)) {
+ PyObjectContainer fd_container(PyObject_CallMethod(
+ po_sock,
+ const_cast<char*>("fileno"),
+ NULL));
+ PyObjectContainer fdarg_container(
+ Py_BuildValue("(O)", fd_container.get()));
+ int fd;
+ if (PyArg_ParseTuple(fdarg_container.get(), "i", &fd)) {
+ self->cppobj = new SocketSessionReceiver(fd);
+ return (0);
+ }
+ PyErr_SetString(PyExc_TypeError, "Given object's fileno() doesn't "
+ "return an integer, probably not a valid socket "
+ "object");
+ }
+ } catch (const PyCPPWrapperException& ex) {
+ // This could happen due to memory allocation failure, but it's more
+ // likely that the object doesn't have the "fileno()" method or it
+ // returns an unexpected type of value. So we adjust the error
+ // message accordingly.
+ PyErr_SetString(PyExc_TypeError, "Failed to parse parameter, "
+ "probably not a valid socket object");
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to construct SocketSessionReceiver object: " +
+ string(ex.what());
+ PyErr_SetString(po_SocketSessionError, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ }
+
+ return (-1);
+}
+
+PyObject*
+createPySocketAddress(const struct sockaddr& sa) {
+ socklen_t salen;
+ if (sa.sa_family == AF_INET) {
+ salen = sizeof(struct sockaddr_in);
+ } else if (sa.sa_family == AF_INET6) {
+ salen = sizeof(struct sockaddr_in6);
+ } else {
+ isc_throw(SocketSessionError, "Unsupported socket address family: "
+ << static_cast<int>(sa.sa_family));
+ }
+
+ char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
+ const int error = getnameinfo(&sa, salen, hbuf, sizeof(hbuf), sbuf,
+ sizeof(sbuf),
+ NI_NUMERICHOST | NI_NUMERICSERV);
+ if (error != 0) {
+ isc_throw(SocketSessionError, "Unrecognized socket address format: "
+ << gai_strerror(error));
+ }
+ if (sa.sa_family == AF_INET) {
+ return (Py_BuildValue("(si)", hbuf, lexical_cast<int>(sbuf)));
+ }
+ // We know it's AF_INET6 at this point. We need some special trick for
+ // non-0 scope (zone) ID: getnameinfo() may convert the address to a
+ // textual representation using the extension described in RFC 4007,
+ // in which case it contains a delimiter character '%'. We need to remove
+ // it before constructing the tuple. The scope (zone) ID is preserved
+ // in the corresponding field of the tuple.
+ const void* p = &sa;
+ const struct sockaddr_in6* sin6 =
+ static_cast<const struct sockaddr_in6*>(p);
+ char* cp = strchr(hbuf, '%');
+ if (cp != NULL) {
+ *cp = '\0';
+ }
+ return (Py_BuildValue("(siii)", hbuf, lexical_cast<int>(sbuf), 0,
+ sin6->sin6_scope_id));
+}
+
+void
+SocketSessionReceiver_destroy(PyObject* po_self) {
+ s_SocketSessionReceiver* self =
+ static_cast<s_SocketSessionReceiver*>(po_self);
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// A helper struct to automatically close a socket in an RAII manner.
+struct ScopedSocket : boost::noncopyable {
+ ScopedSocket(int fd) : fd_(fd) {}
+ ~ScopedSocket() {
+ close(fd_);
+ }
+ const int fd_;
+};
+
+PyObject*
+SocketSessionReceiver_pop(PyObject* po_self, PyObject*) {
+ s_SocketSessionReceiver* const self =
+ static_cast<s_SocketSessionReceiver*>(po_self);
+
+ try {
+ // retrieve the session, and the convert it to a corresponding
+ // Python tuple.
+ const SocketSession session = self->cppobj->pop();
+
+ // We need to immediately store the socket file descriptor in a
+ // ScopedSocket object. socket.fromfd() will dup() the FD, so we need
+ // to close our copy even if an exception is thrown.
+ ScopedSocket sock(session.getSocket());
+
+ // Build Python socket object
+ PyObjectContainer c_args(Py_BuildValue("(iiii)", sock.fd_,
+ session.getFamily(),
+ session.getType(),
+ session.getProtocol()));
+ PyObjectContainer c_sock(PyObject_CallObject(socket_fromfd_obj,
+ c_args.get()));
+ // Convert the local and remote sockaddr to Python socket address objs
+ PyObjectContainer c_local(createPySocketAddress(
+ session.getLocalEndpoint()));
+ PyObjectContainer c_remote(createPySocketAddress(
+ session.getRemoteEndpoint()));
+ // Convert the session data to Python byte object.
+ PyObjectContainer c_data(Py_BuildValue("y#", session.getData(),
+ session.getDataLength()));
+
+ // Build a tuple from them and return it.
+ return (Py_BuildValue("(OOOO)", c_sock.get(), c_local.get(),
+ c_remote.get(), c_data.get()));
+ } catch (const SocketSessionError& ex) {
+ PyErr_SetString(po_SocketSessionError, ex.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure in receiving a socket session: " +
+ string(ex.what());
+ PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ return (NULL);
+ }
+}
+
+// These are the functions we export
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef SocketSessionReceiver_methods[] = {
+ { "pop", SocketSessionReceiver_pop, METH_NOARGS,
+ SocketSessionReceiver_pop_doc },
+ { NULL, NULL, 0, NULL }
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace util {
+namespace io {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_SocketSessionReceiver
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject socketsessionreceiver_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.util.cio.SocketSessionReceiver",
+ sizeof(s_SocketSessionReceiver), // tp_basicsize
+ 0, // tp_itemsize
+ SocketSessionReceiver_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ SocketSessionReceiver_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ SocketSessionReceiver_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ SocketSessionReceiver_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+// Module Initialization, all statics are initialized here
+bool
+initModulePart_SocketSessionReceiver(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&socketsessionreceiver_type) < 0) {
+ return (false);
+ }
+ void* p = &socketsessionreceiver_type;
+ if (PyModule_AddObject(mod, "SocketSessionReceiver",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+
+ PyObject* socket_module = PyImport_AddModule("socket");
+ if (socket_module != NULL) {
+ PyObject* socket_dict = PyModule_GetDict(socket_module);
+ if (socket_dict != NULL) {
+ socket_fromfd_obj = PyDict_GetItemString(socket_dict, "fromfd");
+ }
+ }
+ if (socket_fromfd_obj != NULL) {
+ Py_INCREF(socket_fromfd_obj);
+ } else {
+ PyErr_SetString(PyExc_RuntimeError,
+ "isc.util.cio.SocketSessionReceiver needs "
+ "socket.fromfd(), but it's missing");
+ return (false);
+ }
+
+ Py_INCREF(&socketsessionreceiver_type);
+
+ return (true);
+}
+
+} // namespace python
+} // namespace io
+} // namespace util
+} // namespace isc
diff --git a/src/lib/python/isc/util/cio/socketsessionreceiver_python.h b/src/lib/python/isc/util/cio/socketsessionreceiver_python.h
new file mode 100644
index 0000000..14e8a1b
--- /dev/null
+++ b/src/lib/python/isc/util/cio/socketsessionreceiver_python.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SOCKETSESSIONRECEIVER_H
+#define __PYTHON_SOCKETSESSIONRECEIVER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace util {
+namespace io {
+class SocketSessionReceiver;
+
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_SocketSessionReceiver : public PyObject {
+public:
+ s_SocketSessionReceiver();
+ SocketSessionReceiver* cppobj;
+};
+
+extern PyTypeObject socketsessionreceiver_type;
+
+bool initModulePart_SocketSessionReceiver(PyObject* mod);
+
+} // namespace io
+} // namespace python
+} // namespace util
+} // namespace isc
+#endif // __PYTHON_SOCKETSESSIONRECEIVER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/util/cio/tests/Makefile.am b/src/lib/python/isc/util/cio/tests/Makefile.am
new file mode 100644
index 0000000..3429009
--- /dev/null
+++ b/src/lib/python/isc/util/cio/tests/Makefile.am
@@ -0,0 +1,36 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = socketsession_test.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+# Note: below we intentionally use a non absolute path for TESTDATAOBJDIR.
+# It will be used as part of the path for a UNIX domain socket. Due to the
+# relatively lower limit on the length it's better to keep it as short as
+# possible.
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/isc/python/util/io/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ TESTDATAOBJDIR=$(builddir) \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
+CLEANFILES = $(builddir)/ssessiontest.unix
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/util/cio/tests/socketsession_test.py b/src/lib/python/isc/util/cio/tests/socketsession_test.py
new file mode 100644
index 0000000..d492f6d
--- /dev/null
+++ b/src/lib/python/isc/util/cio/tests/socketsession_test.py
@@ -0,0 +1,267 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os, signal, socket, unittest
+from socket import AF_INET, AF_INET6, SOCK_STREAM, SOCK_DGRAM, IPPROTO_UDP, \
+ IPPROTO_TCP
+from isc.util.cio.socketsession import *
+
+TESTDATA_OBJDIR = os.getenv("TESTDATAOBJDIR")
+TEST_UNIX_FILE = TESTDATA_OBJDIR + '/ssessiontest.unix'
+TEST_DATA = b'BIND10 test'
+TEST_PORT = 53535
+TEST_PORT2 = 53536
+TEST_PORT3 = 53537
+
+class TestForwarder(unittest.TestCase):
+ '''In general, this is a straightforward port of the C++ counterpart.
+
+ In some cases test cases are simplified or have Python specific cases.
+
+ '''
+
+ def setUp(self):
+ self.listen_sock = None
+ self.forwarder = SocketSessionForwarder(TEST_UNIX_FILE)
+ if os.path.exists(TEST_UNIX_FILE):
+ os.unlink(TEST_UNIX_FILE)
+ self.large_text = b'a' * 65535
+
+ def tearDown(self):
+ if self.listen_sock is not None:
+ self.listen_sock.close()
+ if os.path.exists(TEST_UNIX_FILE):
+ os.unlink(TEST_UNIX_FILE)
+
+ def start_listen(self):
+ self.listen_sock = socket.socket(socket.AF_UNIX, SOCK_STREAM, 0)
+ self.listen_sock.bind(TEST_UNIX_FILE)
+ self.listen_sock.listen(10)
+
+ def accept_forwarder(self):
+ self.listen_sock.setblocking(False)
+ s, _ = self.listen_sock.accept()
+ s.setblocking(True)
+ return s
+
+ def test_init(self):
+ # check bad arguments. valid cases will covered in other tests.
+ self.assertRaises(TypeError, SocketSessionForwarder, 1)
+ self.assertRaises(TypeError, SocketSessionForwarder,
+ 'test.unix', 'test.unix')
+
+ def test_badpush(self):
+ # bad numbers of parameters
+ self.assertRaises(TypeError, self.forwarder.push, 1)
+ self.assertRaises(TypeError, self.forwarder.push, 0, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('127.0.0.1', 53),
+ ('192.0.2.1', 5300), TEST_DATA, 0)
+ # contain a bad type of parameter
+ self.assertRaises(TypeError, self.forwarder.push, 0, 'AF_INET',
+ SOCK_DGRAM, IPPROTO_UDP, ('127.0.0.1', 53),
+ ('192.0.2.1', 5300), TEST_DATA)
+ # bad local address
+ self.assertRaises(TypeError, self.forwarder.push, 0, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('127.0.0..1', 53),
+ ('192.0.2.1', 5300), TEST_DATA)
+ self.assertRaises(TypeError, self.forwarder.push, 0, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, '127.0.0.1',
+ ('192.0.2.1', 5300), TEST_DATA)
+ # bad remote address
+ self.assertRaises(TypeError, self.forwarder.push, 0, AF_INET6,
+ SOCK_DGRAM, IPPROTO_UDP, ('2001:db8::1', 53),
+ ('2001:db8:::3', 5300), TEST_DATA)
+
+ # push before connect
+ self.assertRaises(TypeError, self.forwarder.push, 0, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('192.0.2.1', 53),
+ ('192.0.2.2', 53), TEST_DATA)
+
+ # Now connect the forwarder for the rest of tests
+ self.start_listen()
+ self.forwarder.connect_to_receiver()
+
+ # Inconsistent address family
+ self.assertRaises(TypeError, self.forwarder.push, 1, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('2001:db8::1', 53, 0, 1),
+ ('192.0.2.2', 53), TEST_DATA)
+ self.assertRaises(TypeError, self.forwarder.push, 1, AF_INET6,
+ SOCK_DGRAM, IPPROTO_UDP, ('2001:db8::1', 53, 0, 1),
+ ('192.0.2.2', 53), TEST_DATA)
+
+ # Empty data: we reject them at least for now
+ self.assertRaises(TypeError, self.forwarder.push, 1, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('192.0.2.1', 53),
+ ('192.0.2.2', 53), b'')
+
+ # Too big data: we reject them at least for now
+ self.assertRaises(TypeError, self.forwarder.push, 1, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('192.0.2.1', 53),
+ ('192.0.2.2', 53), b'd' * 65536)
+
+ # Close the receptor before push. It will result in SIGPIPE (should be
+ # ignored) and EPIPE, which will be converted to SocketSessionError.
+ self.listen_sock.close()
+ self.assertRaises(SocketSessionError, self.forwarder.push, 1, AF_INET,
+ SOCK_DGRAM, IPPROTO_UDP, ('192.0.2.1', 53),
+ ('192.0.2.2', 53), TEST_DATA)
+
+ def create_socket(self, family, type, protocol, addr, do_listen):
+ s = socket.socket(family, type, protocol)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.bind(addr)
+ if do_listen and protocol == IPPROTO_TCP:
+ s.listen(1)
+ return s
+
+ def check_push_and_pop(self, family, type, protocol, local, remote,
+ data, new_connection):
+ sock = self.create_socket(family, type, protocol, local, True)
+ fwd_fd = sock.fileno()
+ if protocol == IPPROTO_TCP:
+ client_addr = ('::1', 0, 0, 0) if family == AF_INET6 \
+ else ('127.0.0.1', 0)
+ client_sock = self.create_socket(family, type, protocol,
+ client_addr, False)
+ client_sock.setblocking(False)
+ try:
+ client_sock.connect(local)
+ except socket.error:
+ pass
+ server_sock, _ = sock.accept()
+ fwd_fd = server_sock.fileno()
+
+ # If a new connection is required, start the "server", have the
+ # internal forwarder connect to it, and then internally accept it.
+ if new_connection:
+ self.start_listen()
+ self.forwarder.connect_to_receiver()
+ self.accept_sock = self.accept_forwarder()
+
+ # Then push one socket session via the forwarder.
+ self.forwarder.push(fwd_fd, family, type, protocol, local, remote,
+ data)
+
+ # Pop the socket session we just pushed from a local receiver, and
+ # check the content.
+ receiver = SocketSessionReceiver(self.accept_sock)
+ signal.alarm(1)
+ sock_session = receiver.pop()
+ signal.alarm(0)
+ passed_sock = sock_session[0]
+ self.assertNotEqual(fwd_fd, passed_sock.fileno())
+ self.assertEqual(family, passed_sock.family)
+ self.assertEqual(type, passed_sock.type)
+ self.assertEqual(protocol, passed_sock.proto)
+ self.assertEqual(local, sock_session[1])
+ self.assertEqual(remote, sock_session[2])
+ self.assertEqual(data, sock_session[3])
+
+ # Check if the passed FD is usable by sending some data from it.
+ passed_sock.setblocking(True)
+ if protocol == IPPROTO_UDP:
+ self.assertEqual(len(TEST_DATA), passed_sock.sendto(TEST_DATA,
+ local))
+ sock.settimeout(10)
+ self.assertEqual(TEST_DATA, sock.recvfrom(len(TEST_DATA))[0])
+ else:
+ self.assertEqual(len(TEST_DATA), passed_sock.send(TEST_DATA))
+ client_sock.setblocking(True)
+ client_sock.settimeout(10)
+ self.assertEqual(TEST_DATA, client_sock.recv(len(TEST_DATA)))
+ server_sock.close()
+ client_sock.close()
+
+ passed_sock.close()
+ sock.close()
+
+ def test_push_and_pop(self):
+ # This is a straightforward port of C++ pushAndPop test. See the
+ # C++ version why we use multiple ports for "local".
+ local6 = ('::1', TEST_PORT, 0, 0)
+ local6_alt = ('::1', TEST_PORT2, 0, 0)
+ local6_alt2 = ('::1', TEST_PORT3, 0, 0)
+ remote6 = ('2001:db8::1', 5300, 0, 0)
+ self.check_push_and_pop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP,
+ local6, remote6, TEST_DATA, True)
+ self.check_push_and_pop(AF_INET6, SOCK_STREAM, IPPROTO_TCP,
+ local6, remote6, TEST_DATA, False)
+
+ local4 = ('127.0.0.1', TEST_PORT)
+ local4_alt = ('127.0.0.1', TEST_PORT2)
+ remote4 = ('192.0.2.2', 5300)
+ self.check_push_and_pop(AF_INET, SOCK_DGRAM, IPPROTO_UDP,
+ local4, remote4, TEST_DATA, False)
+ self.check_push_and_pop(AF_INET, SOCK_STREAM, IPPROTO_TCP,
+ local4, remote4, TEST_DATA, False)
+
+ self.check_push_and_pop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP,
+ local6_alt, remote6, self.large_text, False)
+ self.check_push_and_pop(AF_INET6, SOCK_STREAM, IPPROTO_TCP,
+ local6, remote6, self.large_text, False)
+ self.check_push_and_pop(AF_INET, SOCK_DGRAM, IPPROTO_UDP,
+ local4_alt, remote4, self.large_text, False)
+ self.check_push_and_pop(AF_INET, SOCK_STREAM, IPPROTO_TCP,
+ local4, remote4, self.large_text, False)
+
+ # Python specific: check for an IPv6 scoped address with non 0
+ # scope (zone) ID
+ scope6 = ('fe80::1', TEST_PORT, 0, 1)
+ self.check_push_and_pop(AF_INET6, SOCK_DGRAM, IPPROTO_UDP,
+ local6_alt2, scope6, TEST_DATA, False)
+
+ def test_push_too_fast(self):
+ # A straightforward port of C++ pushTooFast test.
+ def multi_push(forwarder, addr, data):
+ for i in range(0, 10):
+ forwarder.push(1, AF_INET, SOCK_DGRAM, IPPROTO_UDP, addr,
+ addr, data)
+ self.start_listen()
+ self.forwarder.connect_to_receiver()
+ self.assertRaises(SocketSessionError, multi_push, self.forwarder,
+ ('192.0.2.1', 53), self.large_text)
+
+ def test_bad_pop(self):
+ # This is a subset of C++ badPop test. We only check pop() raises
+ # SocketSessionError when it internally fails to get the FD.
+ # Other cases would require passing a valid FD from the test,
+ # which would make the test too complicated. As a wrapper checking
+ # one common failure case should be reasonably sufficient.
+
+ self.start_listen()
+ s = socket.socket(socket.AF_UNIX, SOCK_STREAM, 0)
+ s.setblocking(False)
+ s.connect(TEST_UNIX_FILE)
+ accept_sock = self.accept_forwarder()
+ receiver = SocketSessionReceiver(accept_sock)
+ s.close()
+ self.assertRaises(SocketSessionError, receiver.pop)
+ accept_sock.close()
+
+class TestReceiver(unittest.TestCase):
+ # We only check a couple of failure cases on construction. Valid cases
+ # are covered in TestForwarder.
+
+ def test_bad_init(self):
+ class FakeSocket:
+ # pretending to be th standard socket class, but its fileno() is
+ # bogus.
+ def fileno(self):
+ return None
+ self.assertRaises(TypeError, SocketSessionReceiver, 1)
+ self.assertRaises(TypeError, SocketSessionReceiver, FakeSocket())
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index a2d9a7d..ea51967 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -15,15 +15,18 @@
"""
This helps the XFR in process with accumulating parts of diff and applying
-it to the datasource.
+it to the datasource. It also has a 'single update mode' which is useful
+for DDNS.
The name of the module is not yet fully decided. We might want to move it
-under isc.datasrc or somewhere else, because we might want to reuse it with
-future DDNS process. But until then, it lives here.
+under isc.datasrc or somewhere else, because we are reusing it with DDNS.
+But for now, it lives here.
"""
import isc.dns
+from isc.datasrc import ZoneFinder
import isc.log
+from isc.datasrc import ZoneFinder
from isc.log_messages.libxfrin_messages import *
class NoSuchZone(Exception):
@@ -59,7 +62,8 @@ class Diff:
the changes to underlying data source right away, but keeps them for
a while.
"""
- def __init__(self, ds_client, zone, replace=False):
+ def __init__(self, ds_client, zone, replace=False, journaling=False,
+ single_update_mode=False):
"""
Initializes the diff to a ready state. It checks the zone exists
in the datasource and if not, NoSuchZone is raised. This also creates
@@ -67,21 +71,57 @@ class Diff:
The ds_client is the datasource client containing the zone. Zone is
isc.dns.Name object representing the name of the zone (its apex).
- If replace is true, the content of the whole zone is wiped out before
+ If replace is True, the content of the whole zone is wiped out before
applying the diff.
+ If journaling is True, the history of subsequent updates will be
+ recorded as well as the updates themselves as long as the underlying
+ data source support the journaling. If the data source allows
+ incoming updates but does not support journaling, the Diff object
+ will still continue applying the diffs with disabling journaling.
+
+ If single_update_mode is true, the update is expected to only contain
+ 1 set of changes (i.e. one set of additions, and one set of deletions).
+ If so, the additions and deletions are kept separately, and applied
+ in one go upon commit() or apply(). In this mode, additions and
+ deletions can be done in any order. The first addition and the
+ first deletion still have to be the new and old SOA records,
+ respectively. Once apply() or commit() has been called, this
+ requirement is renewed (since the diff object is essentialy reset).
+
+ In this single_update_mode, upon commit, the deletions are performed
+ first, and then the additions. With the previously mentioned
+ restrictions, this means that the actual update looks like a single
+ IXFR changeset (which can then be journaled). Apart from those
+ restrictions, this class does not do any checking of data; it is
+ the caller's responsibility to keep the data 'sane', and this class
+ does not presume to have any knowledge of DNS zone content sanity.
+ For instance, though it enforces the SOA to be deleted first, and
+ added first, it does no checks on the SERIAL value.
+
You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
exceptions.
"""
- self.__updater = ds_client.get_updater(zone, replace)
+ try:
+ self.__updater = ds_client.get_updater(zone, replace, journaling)
+ except isc.datasrc.NotImplemented as ex:
+ if not journaling:
+ raise ex
+ self.__updater = ds_client.get_updater(zone, replace, False)
+ logger.info(LIBXFRIN_NO_JOURNAL, zone, ds_client)
if self.__updater is None:
# The no such zone case
raise NoSuchZone("Zone " + str(zone) +
" does not exist in the data source " +
str(ds_client))
- self.__buffer = []
-
- def __check_commited(self):
+ self.__single_update_mode = single_update_mode
+ if single_update_mode:
+ self.__additions = []
+ self.__deletions = []
+ else:
+ self.__buffer = []
+
+ def __check_committed(self):
"""
This checks if the diff is already commited or broken. If it is, it
raises ValueError. This check is for methods that need to work only on
@@ -91,14 +131,47 @@ class Diff:
raise ValueError("The diff is already commited or it has raised " +
"an exception, you come late")
+ def __append_with_soa_check(self, buf, operation, rr):
+ """
+ Helper method for __data_common().
+ Add the given rr to the given buffer, but with a SOA check;
+ - if the buffer is empty, the RRType of the rr must be SOA
+ - if the buffer is not empty, the RRType must not be SOA
+ Raises a ValueError if these rules are not satisified.
+ If they are, the RR is appended to the buffer.
+ Arguments:
+ buf: buffer to add to
+ operation: operation to perform (either 'add' or 'delete')
+ rr: RRset to add to the buffer
+ """
+ # first add or delete must be of type SOA
+ if len(buf) == 0 and\
+ rr.get_type() != isc.dns.RRType.SOA():
+ raise ValueError("First " + operation +
+ " in single update mode must be of type SOA")
+ # And later adds or deletes may not
+ elif len(buf) != 0 and\
+ rr.get_type() == isc.dns.RRType.SOA():
+ raise ValueError("Multiple SOA records in single " +
+ "update mode " + operation)
+ buf.append((operation, rr))
+
def __data_common(self, rr, operation):
"""
Schedules an operation with rr.
It does all the real work of add_data and delete_data, including
all checks.
+
+ Raises a ValueError in several cases:
+ - if the rrset contains multiple rrs
+ - if the class of the rrset does not match that of the update
+ - in single_update_mode if the first rr is not of type SOA (both
+ for addition and deletion)
+ - in single_update_mode if any later rr is of type SOA (both for
+ addition and deletion)
"""
- self.__check_commited()
+ self.__check_committed()
if rr.get_rdata_count() != 1:
raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
'it holds ' + str(rr.get_rdata_count()))
@@ -106,10 +179,21 @@ class Diff:
raise ValueError("The rrset's class " + str(rr.get_class()) +
" does not match updater's " +
str(self.__updater.get_class()))
- self.__buffer.append((operation, rr))
- if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
- # Time to auto-apply, so the data don't accumulate too much
- self.apply()
+ if self.__single_update_mode:
+ if operation == 'add':
+ if not self._remove_rr_from_deletions(rr):
+ self.__append_with_soa_check(self.__additions, operation,
+ rr)
+ elif operation == 'delete':
+ if not self._remove_rr_from_additions(rr):
+ self.__append_with_soa_check(self.__deletions, operation,
+ rr)
+ else:
+ self.__buffer.append((operation, rr))
+ if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
+ # Time to auto-apply, so the data don't accumulate too much
+ # This is not done for DDNS type data
+ self.apply()
def add_data(self, rr):
"""
@@ -150,22 +234,47 @@ class Diff:
and do more merging, but such diffs should be rare in practice anyway,
so we don't bother and do it this simple way.
"""
- buf = []
- for (op, rrset) in self.__buffer:
- old = buf[-1][1] if len(buf) > 0 else None
- if old is None or op != buf[-1][0] or \
- rrset.get_name() != old.get_name() or \
- rrset.get_type() != old.get_type():
- buf.append((op, isc.dns.RRset(rrset.get_name(),
- rrset.get_class(),
- rrset.get_type(),
- rrset.get_ttl())))
- if rrset.get_ttl() != buf[-1][1].get_ttl():
- logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
- buf[-1][1].get_ttl())
- for rdatum in rrset.get_rdata():
- buf[-1][1].add_rdata(rdatum)
- self.__buffer = buf
+ def same_type(rrset1, rrset2):
+ '''A helper routine to identify whether two RRsets are of the
+ same 'type'. For RRSIGs we should consider type covered, too.
+ '''
+ if rrset1.get_type() != isc.dns.RRType.RRSIG() or \
+ rrset2.get_type != isc.dns.RRType.RRSIG():
+ return rrset1.get_type() == rrset2.get_type()
+ # RR type of the both RRsets is RRSIG. Compare type covered.
+ # We know they have exactly one RDATA.
+ sigdata1 = rrset1.get_rdata()[0].to_text().split()[0]
+ sigdata2 = rrset2.get_rdata()[0].to_text().split()[0]
+ return sigdata1 == sigdata2
+
+ def compact_buffer(buffer_to_compact):
+ '''Internal helper function for compacting buffers, compacts the
+ given buffer.
+ Returns the compacted buffer.
+ '''
+ buf = []
+ for (op, rrset) in buffer_to_compact:
+ old = buf[-1][1] if len(buf) > 0 else None
+ if old is None or op != buf[-1][0] or \
+ rrset.get_name() != old.get_name() or \
+ (not same_type(rrset, old)):
+ buf.append((op, isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())))
+ if rrset.get_ttl() != buf[-1][1].get_ttl():
+ logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
+ buf[-1][1].get_ttl(), rrset.get_name(),
+ rrset.get_class(), rrset.get_type())
+ for rdatum in rrset.get_rdata():
+ buf[-1][1].add_rdata(rdatum)
+ return buf
+
+ if self.__single_update_mode:
+ self.__additions = compact_buffer(self.__additions)
+ self.__deletions = compact_buffer(self.__deletions)
+ else:
+ self.__buffer = compact_buffer(self.__buffer)
def apply(self):
"""
@@ -183,25 +292,41 @@ class Diff:
It also can raise isc.datasrc.Error. If that happens, you should stop
using this object and abort the modification.
"""
- self.__check_commited()
- # First, compact the data
- self.compact()
- try:
- # Then pass the data inside the data source
- for (operation, rrset) in self.__buffer:
+ def apply_buffer(buf):
+ '''
+ Helper method to apply all operations in the given buffer
+ '''
+ for (operation, rrset) in buf:
if operation == 'add':
self.__updater.add_rrset(rrset)
elif operation == 'delete':
self.__updater.delete_rrset(rrset)
else:
raise ValueError('Unknown operation ' + operation)
+
+ self.__check_committed()
+ # First, compact the data
+ self.compact()
+ try:
+ # Then pass the data inside the data source
+ if self.__single_update_mode:
+ apply_buffer(self.__deletions)
+ apply_buffer(self.__additions)
+ else:
+ apply_buffer(self.__buffer)
+
# As everything is already in, drop the buffer
except:
# If there's a problem, we can't continue.
self.__updater = None
raise
- self.__buffer = []
+ # all went well, reset state of buffers
+ if self.__single_update_mode:
+ self.__additions = []
+ self.__deletions = []
+ else:
+ self.__buffer = []
def commit(self):
"""
@@ -211,7 +336,7 @@ class Diff:
This might raise isc.datasrc.Error.
"""
- self.__check_commited()
+ self.__check_committed()
# Push the data inside the data source
self.apply()
# Make sure they are visible.
@@ -233,5 +358,229 @@ class Diff:
Probably useful only for testing and introspection purposes. Don't
modify the list.
+
+ Raises a ValueError if the buffer is in single_update_mode.
+ """
+ if self.__single_update_mode:
+ raise ValueError("Compound buffer requested in single-update mode")
+ else:
+ return self.__buffer
+
+ def get_single_update_buffers(self):
+ """
+ Returns the current buffers of changes not yet passed into the data
+ source. It is a tuple of the current deletions and additions, which
+ each are in a form like [('delete', rrset), ('delete', rrset), ...],
+ and [('add', rrset), ('add', rrset), ..].
+
+ Probably useful only for testing and introspection purposes. Don't
+ modify the lists.
+
+ Raises a ValueError if the buffer is not in single_update_mode.
+ """
+ if not self.__single_update_mode:
+ raise ValueError("Separate buffers requested in single-update mode")
+ else:
+ return (self.__deletions, self.__additions)
+
+ def find(self, name, rrtype,
+ options=(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK)):
+ """
+ Calls the find() method in the ZoneFinder associated with this
+ Diff's ZoneUpdater, i.e. the find() on the zone as it was on the
+ moment this Diff object got created.
+ See the ZoneFinder documentation for a full description.
+ Note that the result does not include changes made in this Diff
+ instance so far.
+ Options default to NO_WILDCARD and FIND_GLUE_OK.
+ Raises a ValueError if the Diff has been committed already
+ """
+ self.__check_committed()
+ return self.__updater.find(name, rrtype, options)
+
+ def find_all(self, name,
+ options=(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK)):
+ """
+ Calls the find() method in the ZoneFinder associated with this
+ Diff's ZoneUpdater, i.e. the find_all() on the zone as it was on the
+ moment this Diff object got created.
+ See the ZoneFinder documentation for a full description.
+ Note that the result does not include changes made in this Diff
+ instance so far.
+ Options default to NO_WILDCARD and FIND_GLUE_OK.
+ Raises a ValueError if the Diff has been committed already
"""
- return self.__buffer
+ self.__check_committed()
+ return self.__updater.find_all(name, options)
+
+ def __remove_rr_from_buffer(self, buf, rr):
+ '''Helper for common code in remove_rr_from_deletions() and
+ remove_rr_from_additions();
+ returns the result of the removal operation on the given buffer
+ '''
+ def same_rr(a, b):
+ # Consider two rr's the same if name, type, and rdata match
+ # Note that at this point it should have been checked that
+ # the rr in the buffer and the given rr have exactly one rdata
+ return a.get_name() == b.get_name() and\
+ a.get_type() == b.get_type() and\
+ a.get_rdata()[0] == b.get_rdata()[0]
+ if rr.get_type() == isc.dns.RRType.SOA():
+ return buf
+ else:
+ return [ op for op in buf if not same_rr(op[1], rr)]
+
+ def _remove_rr_from_deletions(self, rr):
+ '''
+ Removes the given rr from the currently buffered deletions;
+ returns True if anything is removed, False if the RR was not present.
+ This method is protected; it is not meant to be called from anywhere
+ but the add_data() method. It is not private for easier testing.
+ '''
+ orig_size = len(self.__deletions)
+ self.__deletions = self.__remove_rr_from_buffer(self.__deletions, rr)
+ return len(self.__deletions) != orig_size
+
+ def _remove_rr_from_additions(self, rr):
+ '''
+ Removes the given rr from the currently buffered additions;
+ returns True if anything is removed, False if the RR was not present.
+ This method is protected; it is not meant to be called from anywhere
+ but the delete_data() method. It is not private for easier testing.
+ '''
+ orig_size = len(self.__additions)
+ self.__additions = self.__remove_rr_from_buffer(self.__additions, rr)
+ return len(self.__additions) != orig_size
+
+ def __get_name_from_additions(self, name):
+ '''
+ Returns a list of all rrs in the additions queue that have the given
+ Name.
+ This method is protected; it is not meant to be called from anywhere
+ but the find_all_updated() method. It is not private for easier
+ testing.
+ '''
+ return [ rr for (_, rr) in self.__additions if rr.get_name() == name ]
+
+ def __get_name_from_deletions(self, name):
+ '''
+ Returns a list of all rrs in the deletions queue that have the given
+ Name
+ This method is protected; it is not meant to be called from anywhere
+ but the find_all_updated() method. It is not private for easier
+ testing.
+ '''
+ return [ rr for (_, rr) in self.__deletions if rr.get_name() == name ]
+
+ def __get_name_type_from_additions(self, name, rrtype):
+ '''
+ Returns a list of the rdatas of the rrs in the additions queue that
+ have the given name and type
+ This method is protected; it is not meant to be called from anywhere
+ but the find_updated() method. It is not private for easier testing.
+ '''
+ return [ rr for (_, rr) in self.__additions\
+ if rr.get_name() == name and rr.get_type() == rrtype ]
+
+ def __get_name_type_from_deletions(self, name, rrtype):
+ '''
+ Returns a list of the rdatas of the rrs in the deletions queue that
+ have the given name and type
+ This method is protected; it is not meant to be called from anywhere
+ but the find_updated() method. It is not private for easier testing.
+ '''
+ return [ rr.get_rdata()[0] for (_, rr) in self.__deletions\
+ if rr.get_name() == name and rr.get_type() == rrtype ]
+
+ def find_updated(self, name, rrtype):
+ '''
+ Returns the result of find(), but with current updates applied, i.e.
+ as if this diff has been committed. Only performs additional
+ processing in the case find() returns SUCCESS, NXDOMAIN, or NXRRSET;
+ in all other cases, the results are returned directly.
+ Any RRs in the current deletions buffer are removed from the result,
+ and RRs in the current additions buffer are added to the result.
+ If the result was SUCCESS, but every RR in it is removed due to
+ deletions, and there is nothing in the additions, the rcode is changed
+ to NXRRSET.
+ If the result was NXDOMAIN or NXRRSET, and there are rrs in the
+ additions buffer, the result is changed to SUCCESS.
+ '''
+ if not self.__single_update_mode:
+ raise ValueError("find_updated() can only be used in " +
+ "single-update mode")
+ result, rrset, flags = self.find(name, rrtype)
+
+ added_rrs = self.__get_name_type_from_additions(name, rrtype)
+ deleted_rrs = self.__get_name_type_from_deletions(name, rrtype)
+
+ if result == ZoneFinder.SUCCESS:
+ new_rrset = isc.dns.RRset(name, self.__updater.get_class(),
+ rrtype, rrset.get_ttl())
+ for rdata in rrset.get_rdata():
+ if rdata not in deleted_rrs:
+ new_rrset.add_rdata(rdata)
+ # If all data has been deleted, and there is nothing to add
+ # we cannot really know whether it is NXDOMAIN or NXRRSET,
+ # NXRRSET seems safest (we could find out, but it would require
+ # another search on the name which is probably not worth the
+ # trouble
+ if new_rrset.get_rdata_count() == 0 and len(added_rrs) == 0:
+ result = ZoneFinder.NXRRSET
+ new_rrset = None
+ elif (result == ZoneFinder.NXDOMAIN or result == ZoneFinder.NXRRSET)\
+ and len(added_rrs) > 0:
+ new_rrset = isc.dns.RRset(name, self.__updater.get_class(),
+ rrtype, added_rrs[0].get_ttl())
+ # There was no data in the zone, but there is data now
+ result = ZoneFinder.SUCCESS
+ else:
+ # Can't reliably handle other cases, just return the original
+ # data
+ return result, rrset, flags
+
+ for rr in added_rrs:
+ # Can only be 1-rr RRsets at this point
+ new_rrset.add_rdata(rr.get_rdata()[0])
+
+ return result, new_rrset, flags
+
+ def find_all_updated(self, name):
+ '''
+ Returns the result of find_all(), but with current updates applied,
+ i.e. as if this diff has been committed. Only performs additional
+ processing in the case find() returns SUCCESS or NXDOMAIN;
+ in all other cases, the results are returned directly.
+ Any RRs in the current deletions buffer are removed from the result,
+ and RRs in the current additions buffer are added to the result.
+ If the result was SUCCESS, but every RR in it is removed due to
+ deletions, and there is nothing in the additions, the rcode is changed
+ to NXDOMAIN.
+ If the result was NXDOMAIN, and there are rrs in the additions buffer,
+ the result is changed to SUCCESS.
+ '''
+ if not self.__single_update_mode:
+ raise ValueError("find_all_updated can only be used in " +
+ "single-update mode")
+ result, rrsets, flags = self.find_all(name)
+ new_rrsets = []
+ added_rrs = self.__get_name_from_additions(name)
+ if result == ZoneFinder.SUCCESS and\
+ (flags & ZoneFinder.RESULT_WILDCARD == 0):
+ deleted_rrs = self.__get_name_from_deletions(name)
+ for rr in rrsets:
+ if rr not in deleted_rrs:
+ new_rrsets.append(rr)
+ if len(new_rrsets) == 0 and len(added_rrs) == 0:
+ result = ZoneFinder.NXDOMAIN
+ elif result == ZoneFinder.NXDOMAIN and\
+ len(added_rrs) > 0:
+ result = ZoneFinder.SUCCESS
+ else:
+ # Can't reliably handle other cases, just return the original
+ # data
+ return result, rrsets, flags
+ for rr in added_rrs:
+ if rr.get_name() == name:
+ new_rrsets.append(rr)
+ return result, new_rrsets, flags
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
index be943c8..b948e02 100644
--- a/src/lib/python/isc/xfrin/libxfrin_messages.mes
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -15,7 +15,17 @@
# No namespace declaration - these constants go in the global namespace
# of the libxfrin_messages python module.
-% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.
+% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4/%5. Adjusting %2 -> %1.
The xfrin module received an update containing multiple rdata changes for the
same RRset. But the TTLs of these don't match each other. As we combine them
-together, the later one get's overwritten to the earlier one in the sequence.
+together, the latter one gets overwritten to the earlier one in the sequence.
+
+% LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
index 416d62b..459efc3 100644
--- a/src/lib/python/isc/xfrin/tests/Makefile.am
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -20,5 +20,6 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 9fab890..906406f 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -15,6 +15,7 @@
import isc.log
import unittest
+from isc.datasrc import ZoneFinder
from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
from isc.xfrin.diff import Diff, NoSuchZone
@@ -47,6 +48,13 @@ class DiffTest(unittest.TestCase):
self.__broken_called = False
self.__warn_called = False
self.__should_replace = False
+ self.__find_called = False
+ self.__find_name = None
+ self.__find_type = None
+ self.__find_options = None
+ self.__find_all_called = False
+ self.__find_all_name = None
+ self.__find_all_options = None
# Some common values
self.__rrclass = RRClass.IN()
self.__type = RRType.A()
@@ -69,6 +77,31 @@ class DiffTest(unittest.TestCase):
self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
'192.0.2.2'))
+ # Also create a few other (valid) rrsets
+ # A SOA record
+ self.__rrset_soa = RRset(Name('example.org.'), self.__rrclass,
+ RRType.SOA(), RRTTL(3600))
+ self.__rrset_soa.add_rdata(Rdata(RRType.SOA(), self.__rrclass,
+ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200"))
+ # A few single-rr rrsets that together would for a multi-rr rrset
+ self.__rrset3 = RRset(Name('c.example.org.'), self.__rrclass,
+ RRType.TXT(), self.__ttl)
+ self.__rrset3.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "one"))
+ self.__rrset4 = RRset(Name('c.example.org.'), self.__rrclass,
+ RRType.TXT(), self.__ttl)
+ self.__rrset4.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "two"))
+ self.__rrset5 = RRset(Name('c.example.org.'), self.__rrclass,
+ RRType.TXT(), self.__ttl)
+ self.__rrset5.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "three"))
+ self.__rrset6 = RRset(Name('d.example.org.'), self.__rrclass,
+ RRType.A(), self.__ttl)
+ self.__rrset6.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.1"))
+ self.__rrset7 = RRset(Name('d.example.org.'), self.__rrclass,
+ RRType.A(), self.__ttl)
+ self.__rrset7.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+
def __mock_compact(self):
"""
This can be put into the diff to hook into its compact method and see
@@ -98,6 +131,8 @@ class DiffTest(unittest.TestCase):
in the tested module.
"""
self.__warn_called = True
+ # Also log the message so we can check the log format (manually)
+ self.orig_logger.warn(*args)
def commit(self):
"""
@@ -127,7 +162,7 @@ class DiffTest(unittest.TestCase):
"""
return self.__rrclass
- def get_updater(self, zone_name, replace):
+ def get_updater(self, zone_name, replace, journaling=False):
"""
This one pretends this is the data source client and serves
getting an updater.
@@ -138,11 +173,37 @@ class DiffTest(unittest.TestCase):
# The diff should not delete the old data.
self.assertEqual(self.__should_replace, replace)
self.__updater_requested = True
- # Pretend this zone doesn't exist
if zone_name == Name('none.example.org.'):
+ # Pretend this zone doesn't exist
return None
+
+ # If journaling is enabled, record the fact; for a special zone
+ # pretend that we don't support journaling.
+ if journaling:
+ if zone_name == Name('nodiff.example.org'):
+ raise isc.datasrc.NotImplemented('journaling not supported')
+ self.__journaling_enabled = True
else:
- return self
+ self.__journaling_enabled = False
+
+ return self
+
+ def find(self, name, rrtype, options=None):
+ self.__find_called = True
+ self.__find_name = name
+ self.__find_type = rrtype
+ self.__find_options = options
+ # Doesn't really matter what is returned, as long
+ # as the test can check that it's passed along
+ return "find_return"
+
+ def find_all(self, name, options=None):
+ self.__find_all_called = True
+ self.__find_all_name = name
+ self.__find_all_options = options
+ # Doesn't really matter what is returned, as long
+ # as the test can check that it's passed along
+ return "find_all_return"
def test_create(self):
"""
@@ -152,6 +213,8 @@ class DiffTest(unittest.TestCase):
diff = Diff(self, Name('example.org.'))
self.assertTrue(self.__updater_requested)
self.assertEqual([], diff.get_buffer())
+ # By default journaling is disabled
+ self.assertFalse(self.__journaling_enabled)
def test_create_nonexist(self):
"""
@@ -161,6 +224,14 @@ class DiffTest(unittest.TestCase):
self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
self.assertTrue(self.__updater_requested)
+ def test_create_withjournal(self):
+ Diff(self, Name('example.org'), False, True)
+ self.assertTrue(self.__journaling_enabled)
+
+ def test_create_nojournal(self):
+ Diff(self, Name('nodiff.example.org'), False, True)
+ self.assertFalse(self.__journaling_enabled)
+
def __data_common(self, diff, method, operation):
"""
Common part of test for test_add and test_delte.
@@ -243,6 +314,9 @@ class DiffTest(unittest.TestCase):
self.assertRaises(ValueError, diff.commit)
self.assertRaises(ValueError, diff.add_data, self.__rrset2)
self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
+ self.assertRaises(ValueError, diff.find, Name('foo.example.org.'),
+ RRType.A())
+ self.assertRaises(ValueError, diff.find_all, Name('foo.example.org.'))
diff.apply = orig_apply
self.assertRaises(ValueError, diff.apply)
# This one does not state it should raise, so check it doesn't
@@ -410,7 +484,7 @@ class DiffTest(unittest.TestCase):
Test the TTL handling. A warn function should have been called if they
differ, but that's all, it should not crash or raise.
"""
- orig_logger = isc.xfrin.diff.logger
+ self.orig_logger = isc.xfrin.diff.logger
try:
isc.xfrin.diff.logger = self
diff = Diff(self, Name('example.org.'))
@@ -431,16 +505,589 @@ class DiffTest(unittest.TestCase):
self.assertEqual(self.__ttl, diff.get_buffer()[0][1].get_ttl())
self.assertTrue(self.__warn_called)
finally:
- isc.xfrin.diff.logger = orig_logger
+ isc.xfrin.diff.logger = self.orig_logger
- def test_relpace(self):
- """
+ def test_rrsig_ttl(self):
+ '''Similar to the previous test, but for RRSIGs of different covered
+ types.
+
+ They shouldn't be compacted.
+
+ '''
+ diff = Diff(self, Name('example.org.'))
+ rrsig1 = RRset(Name('example.org'), self.__rrclass,
+ RRType.RRSIG(), RRTTL(3600))
+ rrsig1.add_rdata(Rdata(RRType.RRSIG(), self.__rrclass,
+ 'A 5 3 3600 20000101000000 20000201000000 ' +
+ '0 example.org. FAKEFAKEFAKE'))
+ diff.add_data(rrsig1)
+ rrsig2 = RRset(Name('example.org'), self.__rrclass,
+ RRType.RRSIG(), RRTTL(1800))
+ rrsig2.add_rdata(Rdata(RRType.RRSIG(), self.__rrclass,
+ 'AAAA 5 3 3600 20000101000000 20000201000000 ' +
+ '1 example.org. FAKEFAKEFAKE'))
+ diff.add_data(rrsig2)
+ diff.compact()
+ self.assertEqual(2, len(diff.get_buffer()))
+
+ def test_replace(self):
+ '''
Test that when we want to replace the whole zone, it is propagated.
- """
+ '''
self.__should_replace = True
diff = Diff(self, "example.org.", True)
self.assertTrue(self.__updater_requested)
+ def test_get_buffer(self):
+ '''
+ Test that the getters raise when used in the wrong mode
+ '''
+ diff_multi = Diff(self, Name('example.org.'), single_update_mode=False)
+ self.assertRaises(ValueError, diff_multi.get_single_update_buffers)
+ self.assertEqual([], diff_multi.get_buffer())
+
+ diff_single = Diff(self, Name('example.org.'), single_update_mode=True)
+ self.assertRaises(ValueError, diff_single.get_buffer)
+ self.assertEqual(([], []), diff_single.get_single_update_buffers())
+
+ def test_finds_single(self):
+ '''
+ Test that find_updated() and find_all_updated() can only be used
+ in single-update-mode.
+ '''
+ diff_multi = Diff(self, Name('example.org.'), single_update_mode=False)
+ self.assertRaises(ValueError, diff_multi.find_updated,
+ Name('example.org.'), RRType.A())
+ self.assertRaises(ValueError, diff_multi.find_all_updated,
+ Name('example.org.'))
+
+ def test_single_update_mode(self):
+ '''
+ Test single-update mode. In this mode, updates and deletes can
+ be done in any order, but there may only be one changeset.
+ For both updates and deletes, exactly one SOA rr must be given,
+ and it must be the first change.
+ '''
+
+ # full rrset for A (to check compact())
+ txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT(),
+ RRTTL(3600))
+ txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "one"))
+ txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "two"))
+ txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "three"))
+ a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A(),
+ RRTTL(3600))
+ a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.1"))
+ a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.2"))
+
+ diff = Diff(self, Name('example.org.'), single_update_mode=True)
+
+ # adding a first should fail
+ self.assertRaises(ValueError, diff.add_data, a)
+ # But soa should work
+ diff.add_data(self.__rrset_soa)
+ # And then A should as well
+ diff.add_data(self.__rrset3)
+ diff.add_data(self.__rrset4)
+ diff.add_data(self.__rrset5)
+ # But another SOA should fail again
+ self.assertRaises(ValueError, diff.add_data, self.__rrset_soa)
+
+ # Same for delete
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset6)
+ diff.delete_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset6)
+ diff.delete_data(self.__rrset7)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset_soa)
+
+ # Not compacted yet, so the buffers should be as we
+ # filled them
+ (delbuf, addbuf) = diff.get_single_update_buffers()
+ self.assertEqual([('delete', self.__rrset_soa),
+ ('delete', self.__rrset6),
+ ('delete', self.__rrset7)], delbuf)
+ self.assertEqual([('add', self.__rrset_soa),
+ ('add', self.__rrset3),
+ ('add', self.__rrset4),
+ ('add', self.__rrset5)], addbuf)
+
+ # Compact should compact the A records in both buffers
+ diff.compact()
+ (delbuf, addbuf) = diff.get_single_update_buffers()
+ # need rrset equality again :/
+ self.assertEqual(2, len(delbuf))
+ self.assertEqual(2, len(delbuf[0]))
+ self.assertEqual('delete', delbuf[0][0])
+ self.assertEqual(self.__rrset_soa.to_text(), delbuf[0][1].to_text())
+ self.assertEqual(2, len(delbuf[1]))
+ self.assertEqual('delete', delbuf[1][0])
+ self.assertEqual(a.to_text(), delbuf[1][1].to_text())
+
+ self.assertEqual(2, len(addbuf))
+ self.assertEqual(2, len(addbuf[0]))
+ self.assertEqual('add', addbuf[0][0])
+ self.assertEqual(self.__rrset_soa.to_text(), addbuf[0][1].to_text())
+ self.assertEqual(2, len(addbuf[1]))
+ self.assertEqual('add', addbuf[1][0])
+ self.assertEqual(txt.to_text(), addbuf[1][1].to_text())
+
+ # Apply should reset the buffers
+ diff.apply()
+ (delbuf, addbuf) = diff.get_single_update_buffers()
+ self.assertEqual([], delbuf)
+ self.assertEqual([], addbuf)
+
+ # Now the change has been applied, and the buffers are cleared,
+ # Adding non-SOA records should fail again.
+ self.assertRaises(ValueError, diff.add_data, a)
+ self.assertRaises(ValueError, diff.delete_data, a)
+
+ def test_add_delete_same(self):
+ '''
+ Test that if a record is added, then deleted, it is not added to
+ both buffers, but remove from the addition, and vice versa
+ '''
+ diff = Diff(self, Name('example.org.'), single_update_mode=True)
+ # Need SOA records first
+ diff.delete_data(self.__rrset_soa)
+ diff.add_data(self.__rrset_soa)
+
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ diff.add_data(self.__rrset1)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(2, len(additions))
+
+ diff.delete_data(self.__rrset1)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ diff.delete_data(self.__rrset2)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(2, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ diff.add_data(self.__rrset2)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ def test_find(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('www.example.org.')
+ rrtype = RRType.A()
+
+ self.assertFalse(self.__find_called)
+ self.assertEqual(None, self.__find_name)
+ self.assertEqual(None, self.__find_type)
+ self.assertEqual(None, self.__find_options)
+
+ self.assertEqual("find_return", diff.find(name, rrtype))
+
+ self.assertTrue(self.__find_called)
+ self.assertEqual(name, self.__find_name)
+ self.assertEqual(rrtype, self.__find_type)
+ self.assertEqual(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK,
+ self.__find_options)
+
+ def test_find_options(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('foo.example.org.')
+ rrtype = RRType.TXT()
+ options = ZoneFinder.NO_WILDCARD
+
+ self.assertEqual("find_return", diff.find(name, rrtype, options))
+
+ self.assertTrue(self.__find_called)
+ self.assertEqual(name, self.__find_name)
+ self.assertEqual(rrtype, self.__find_type)
+ self.assertEqual(options, self.__find_options)
+
+ def test_find_all(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('www.example.org.')
+
+ self.assertFalse(self.__find_all_called)
+ self.assertEqual(None, self.__find_all_name)
+ self.assertEqual(None, self.__find_all_options)
+
+ self.assertEqual("find_all_return", diff.find_all(name))
+
+ self.assertTrue(self.__find_all_called)
+ self.assertEqual(name, self.__find_all_name)
+ self.assertEqual(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK,
+ self.__find_all_options)
+
+ def test_find_all_options(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('www.example.org.')
+ options = isc.datasrc.ZoneFinder.NO_WILDCARD
+
+ self.assertFalse(self.__find_all_called)
+ self.assertEqual(None, self.__find_all_name)
+ self.assertEqual(None, self.__find_all_options)
+
+ self.assertEqual("find_all_return", diff.find_all(name, options))
+
+ self.assertTrue(self.__find_all_called)
+ self.assertEqual(name, self.__find_all_name)
+ self.assertEqual(options, self.__find_all_options)
+
+ def __common_remove_rr_from_buffer(self, diff, add_method, remove_method,
+ op_str, buf_nr):
+ add_method(self.__rrset_soa)
+ add_method(self.__rrset2)
+ add_method(self.__rrset3)
+ add_method(self.__rrset4)
+
+ # sanity check
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset2,
+ self.__rrset3,
+ self.__rrset4 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # remove one
+ self.assertTrue(remove_method(self.__rrset2))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3,
+ self.__rrset4 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # SOA should not be removed
+ self.assertFalse(remove_method(self.__rrset_soa))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3,
+ self.__rrset4 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # remove another
+ self.assertTrue(remove_method(self.__rrset4))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # remove nonexistent should return False
+ self.assertFalse(remove_method(self.__rrset4))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ def test_remove_rr_from_additions(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ self.__common_remove_rr_from_buffer(diff, diff.add_data,
+ diff._remove_rr_from_additions,
+ 'add', 1)
+
+ def test_remove_rr_from_deletions(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ self.__common_remove_rr_from_buffer(diff, diff.delete_data,
+ diff._remove_rr_from_deletions,
+ 'delete', 0)
+
+ def __create_find(self, result, rrset, flags):
+ '''
+ Overwrites the local find() method with a method that returns
+ the tuple (result, rrset, flags)
+ '''
+ def new_find(name, rrtype, fflags):
+ return (result, rrset, flags)
+ self.find = new_find
+
+ def __create_find_all(self, result, rrsets, flags):
+ '''
+ Overwrites the local find() method with a method that returns
+ the tuple (result, rrsets, flags)
+ '''
+ def new_find_all(name, fflags):
+ return (result, rrsets, flags)
+ self.find_all = new_find_all
+
+ def __check_find_call(self, method, query_rrset, expected_rcode,
+ expected_rdatas=None):
+ '''
+ Helper for find tests; calls the given method with the name and
+ type of the given rrset. Checks for the given rcode.
+ If expected_rdatas is not none, the result name, and type are
+ checked to match the given rrset ones, and the rdatas are checked
+ to be equal.
+ The given method must have the same arguments and return type
+ as find()
+ '''
+ result, rrset, _ = method(query_rrset.get_name(),
+ query_rrset.get_type())
+ self.assertEqual(expected_rcode, result)
+ if expected_rdatas is not None:
+ self.assertEqual(query_rrset.get_name(), rrset.get_name())
+ self.assertEqual(query_rrset.get_type(), rrset.get_type())
+ if expected_rdatas is not None:
+ self.assertEqual(expected_rdatas, rrset.get_rdata())
+ else:
+ self.assertEqual(None, rrset)
+
+ def __check_find_all_call(self, method, query_rrset, expected_rcode,
+ expected_rrs=[]):
+ '''
+ Helper for find tests; calls the given method with the name and
+ type of the given rrset. Checks for the given rcode.
+ If expected_rdatas is not none, the result name, and type are
+ checked to match the given rrset ones, and the rdatas are checked
+ to be equal.
+ The given method must have the same arguments and return type
+ as find()
+ '''
+ result, rrsets, _ = method(query_rrset.get_name())
+ self.assertEqual(expected_rcode, result)
+ # We have no real equality function for rrsets, but since
+ # the rrsets in question are themselves returns, pointer equality
+ # works as well
+ self.assertEqual(expected_rrs, rrsets)
+
+ def test_find_updated_existing_data(self):
+ '''
+ Tests whether existent data is updated with the additions and
+ deletions from the Diff
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ # override the actual find method
+ self.__create_find(ZoneFinder.SUCCESS, self.__rrset3, 0)
+
+ # sanity check
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # check that normal find also returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Adding another should have it returned in the find_updated
+ diff.add_data(self.__rrset4)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata() +
+ self.__rrset4.get_rdata())
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Adding a different type should have no effect
+ diff.add_data(self.__rrset2)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata() +
+ self.__rrset4.get_rdata())
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Deleting 3 now should result in only 4 being updated
+ diff.delete_data(self.__rrset3)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset4.get_rdata())
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Deleting 4 now should result in empty rrset
+ diff.delete_data(self.__rrset4)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.NXRRSET)
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ def test_find_updated_nonexistent_data(self):
+ '''
+ Test whether added data for a query that would originally result
+ in NXDOMAIN works
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ # override the actual find method
+ self.__create_find(ZoneFinder.NXDOMAIN, None, 0)
+
+ # Sanity check
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ # Add data and see it is returned
+ diff.add_data(self.__rrset3)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ # Add unrelated data, result should be the same
+ diff.add_data(self.__rrset2)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ # Remove, result should now be NXDOMAIN again
+ diff.delete_data(self.__rrset3)
+ result, rrset, _ = diff.find_updated(self.__rrset3.get_name(),
+ self.__rrset3.get_type())
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ def test_find_updated_other(self):
+ '''
+ Test that any other ZoneFinder.result code is directly
+ passed on.
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+
+ # Add and delete some data to make sure it's not used
+ diff.add_data(self.__rrset_soa)
+ diff.add_data(self.__rrset3)
+ diff.delete_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset2)
+
+ for rcode in [ ZoneFinder.DELEGATION,
+ ZoneFinder.CNAME,
+ ZoneFinder.DNAME ]:
+ # override the actual find method
+ self.__create_find(rcode, None, 0)
+ self.__check_find_call(diff.find, self.__rrset3, rcode)
+ self.__check_find_call(diff.find_updated, self.__rrset3, rcode)
+
+ def test_find_all_existing_data(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ # override the actual find method
+ self.__create_find_all(ZoneFinder.SUCCESS, [ self.__rrset3 ], 0)
+
+ # Sanity check
+ result, rrsets, _ = diff.find_all_updated(self.__rrset3.get_name())
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual([self.__rrset3], rrsets)
+
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ # Add a second rr with different type at same name
+ add_rrset = RRset(self.__rrset3.get_name(), self.__rrclass,
+ RRType.A(), self.__ttl)
+ add_rrset.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+ diff.add_data(add_rrset)
+
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.SUCCESS,
+ [self.__rrset3, add_rrset])
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ # Remove original one
+ diff.delete_data(self.__rrset3)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, [add_rrset])
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ # And remove new one, result should then become NXDOMAIN
+ diff.delete_data(add_rrset)
+ result, rrsets, _ = diff.find_all_updated(self.__rrset3.get_name())
+
+ self.assertEqual(ZoneFinder.NXDOMAIN, result)
+ self.assertEqual([ ], rrsets)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ def test_find_all_nonexistent_data(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ self.__create_find_all(ZoneFinder.NXDOMAIN, [], 0)
+
+ # Sanity check
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ # Adding data should change the result
+ diff.add_data(self.__rrset2)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.SUCCESS, [ self.__rrset2 ])
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ # Adding data at other name should not
+ diff.add_data(self.__rrset3)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.SUCCESS, [ self.__rrset2 ])
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ # Deleting it should revert to original
+ diff.delete_data(self.__rrset2)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ def test_find_all_other_results(self):
+ '''
+ Any result code other than SUCCESS and NXDOMAIN should cause
+ the results to be passed on directly
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+
+ # Add and delete some data to make sure it's not used
+ diff.add_data(self.__rrset_soa)
+ diff.add_data(self.__rrset3)
+ diff.delete_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset2)
+
+ for rcode in [ ZoneFinder.NXRRSET,
+ ZoneFinder.DELEGATION,
+ ZoneFinder.CNAME,
+ ZoneFinder.DNAME ]:
+ # override the actual find method
+ self.__create_find_all(rcode, [], 0)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ rcode)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ rcode)
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ rcode)
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ rcode)
+
if __name__ == "__main__":
isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/lib/server_common/.gitignore b/src/lib/server_common/.gitignore
new file mode 100644
index 0000000..e25a98f
--- /dev/null
+++ b/src/lib/server_common/.gitignore
@@ -0,0 +1,2 @@
+/server_common_messages.cc
+/server_common_messages.h
diff --git a/src/lib/server_common/Makefile.am b/src/lib/server_common/Makefile.am
index c2779b4..6010316 100644
--- a/src/lib/server_common/Makefile.am
+++ b/src/lib/server_common/Makefile.am
@@ -21,6 +21,7 @@ libserver_common_la_SOURCES = client.h client.cc
libserver_common_la_SOURCES += keyring.h keyring.cc
libserver_common_la_SOURCES += portconfig.h portconfig.cc
libserver_common_la_SOURCES += logger.h logger.cc
+libserver_common_la_SOURCES += socket_request.h socket_request.cc
nodist_libserver_common_la_SOURCES = server_common_messages.h
nodist_libserver_common_la_SOURCES += server_common_messages.cc
libserver_common_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
@@ -30,6 +31,7 @@ libserver_common_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/acl/libacl.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
+libserver_common_la_LIBADD += $(top_builddir)/src/lib/util/io/libutil_io.la
BUILT_SOURCES = server_common_messages.h server_common_messages.cc
server_common_messages.h server_common_messages.cc: server_common_messages.mes
$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/server_common/server_common_messages.mes
diff --git a/src/lib/server_common/PARTIAL_PORT_ON_WINDOWS b/src/lib/server_common/PARTIAL_PORT_ON_WINDOWS
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/server_common/client.cc b/src/lib/server_common/client.cc
index c74e8c3..e6383d6 100644
--- a/src/lib/server_common/client.cc
+++ b/src/lib/server_common/client.cc
@@ -34,9 +34,6 @@ struct Client::ClientImpl {
const IOMessage& request_;
const IPAddress request_src_;
-private:
- // silence MSVC warning C4512: assignment operator could not be generated
- ClientImpl& operator=(ClientImpl const&);
};
Client::Client(const IOMessage& request_message) :
diff --git a/src/lib/server_common/client.h b/src/lib/server_common/client.h
index 1c5928a..8cafb1e 100644
--- a/src/lib/server_common/client.h
+++ b/src/lib/server_common/client.h
@@ -140,7 +140,7 @@ private:
///
/// \param os A \c std::ostream object on which the insertion operation is
/// performed.
-/// \param edns A reference to an \c Client object output by the operation.
+/// \param client A reference to a \c Client object output by the operation.
/// \return A reference to the same \c std::ostream object referenced by
/// parameter \c os after the insertion operation.
std::ostream& operator<<(std::ostream& os, const Client& client);
diff --git a/src/lib/server_common/logger.h b/src/lib/server_common/logger.h
index cfca1f3..80bc81d 100644
--- a/src/lib/server_common/logger.h
+++ b/src/lib/server_common/logger.h
@@ -18,7 +18,7 @@
#include <log/macros.h>
#include <server_common/server_common_messages.h>
-/// \file logger.h
+/// \file server_common/logger.h
/// \brief Server Common library global logger
///
/// This holds the logger for the server common library. It is a private header
@@ -31,12 +31,11 @@ namespace server_common {
/// \brief The logger for this library
extern isc::log::Logger logger;
-enum {
- /// \brief Trace basic operations
- DBG_TRACE_BASIC = 10,
- /// \brief Print also values used
- DBG_TRACE_VALUES = 40
-};
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// \brief Print also values used
+const int DBG_TRACE_VALUES = DBGLVL_TRACE_BASIC_DATA;
}
}
diff --git a/src/lib/server_common/portconfig.cc b/src/lib/server_common/portconfig.cc
index a85680b..1327f1e 100644
--- a/src/lib/server_common/portconfig.cc
+++ b/src/lib/server_common/portconfig.cc
@@ -16,6 +16,7 @@
#include <server_common/portconfig.h>
#include <server_common/logger.h>
+#include <server_common/socket_request.h>
#include <asiolink/io_address.h>
#include <asiodns/dns_service.h>
@@ -60,8 +61,7 @@ parseAddresses(isc::data::ConstElementPtr addresses,
}
result.push_back(AddressPair(addr->stringValue(),
port->intValue()));
- }
- catch (const TypeError&) { // Better error message
+ } catch (const TypeError&) { // Better error message
LOG_ERROR(logger, SRVCOMM_ADDRESS_TYPE).
arg(addrPair->str());
isc_throw(TypeError,
@@ -78,31 +78,53 @@ parseAddresses(isc::data::ConstElementPtr addresses,
namespace {
+vector<string> current_sockets;
+
void
-setAddresses(DNSService& service, const AddressList& addresses) {
+setAddresses(DNSServiceBase& service, const AddressList& addresses,
+ DNSService::ServerFlag server_options)
+{
service.clearServers();
+ BOOST_FOREACH(const string& token, current_sockets) {
+ socketRequestor().releaseSocket(token);
+ }
+ current_sockets.clear();
BOOST_FOREACH(const AddressPair &address, addresses) {
- service.addServer(address.second, address.first);
+ const int af(IOAddress(address.first).getFamily());
+ // We use the application name supplied to the socket requestor on
+ // creation. So we can freely use the SHARE_SAME
+ const SocketRequestor::SocketID
+ tcp(socketRequestor().requestSocket(SocketRequestor::TCP,
+ address.first, address.second,
+ SocketRequestor::SHARE_SAME));
+ current_sockets.push_back(tcp.second);
+ service.addServerTCPFromFD(tcp.first, af);
+ const SocketRequestor::SocketID
+ udp(socketRequestor().requestSocket(SocketRequestor::UDP,
+ address.first, address.second,
+ SocketRequestor::SHARE_SAME));
+ current_sockets.push_back(udp.second);
+ service.addServerUDPFromFD(udp.first, af, server_options);
}
}
}
void
-installListenAddresses(const AddressList& newAddresses,
- AddressList& addressStore,
- isc::asiodns::DNSService& service)
+installListenAddresses(const AddressList& new_addresses,
+ AddressList& address_store,
+ DNSServiceBase& service,
+ DNSService::ServerFlag server_options)
{
try {
LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_SET_LISTEN);
- BOOST_FOREACH(const AddressPair& addr, newAddresses) {
+ BOOST_FOREACH(const AddressPair& addr, new_addresses) {
LOG_DEBUG(logger, DBG_TRACE_VALUES, SRVCOMM_ADDRESS_VALUE).
arg(addr.first).arg(addr.second);
}
- setAddresses(service, newAddresses);
- addressStore = newAddresses;
- }
- catch (const exception& e) {
+ setAddresses(service, new_addresses, server_options);
+ address_store = new_addresses;
+ } catch (const SocketRequestor::NonFatalSocketError& e) {
/*
* If one of the addresses isn't set successfully, we will restore
* the old addresses, the behavior is that either all address are
@@ -118,14 +140,28 @@ installListenAddresses(const AddressList& newAddresses,
*/
LOG_ERROR(logger, SRVCOMM_ADDRESS_FAIL).arg(e.what());
try {
- setAddresses(service, addressStore);
- }
- catch (const exception& e2) {
+ setAddresses(service, address_store, server_options);
+ } catch (const SocketRequestor::NonFatalSocketError& e2) {
LOG_FATAL(logger, SRVCOMM_ADDRESS_UNRECOVERABLE).arg(e2.what());
+ // If we can't set the new ones, nor the old ones, at least
+ // releasing everything should work. If it doesn't, there isn't
+ // anything else we could do.
+ setAddresses(service, AddressList(), server_options);
+ address_store.clear();
}
//Anyway the new configure has problem, we need to notify configure
//manager the new configure doesn't work
throw;
+ } catch (const exception& e) {
+ // Any other kind of exception is fatal. It might mean we are in
+ // inconsistent state with the boss/socket creator, so we abort
+ // to make sure it doesn't last.
+ LOG_FATAL(logger, SRVCOMM_EXCEPTION_ALLOC).arg(e.what());
+ abort();
+ } catch (...) {
+ // As the previous one, but we know even less info
+ LOG_FATAL(logger, SRVCOMM_UNKNOWN_EXCEPTION_ALLOC);
+ abort();
}
}
diff --git a/src/lib/server_common/portconfig.h b/src/lib/server_common/portconfig.h
index e4e7bf6..0795728 100644
--- a/src/lib/server_common/portconfig.h
+++ b/src/lib/server_common/portconfig.h
@@ -15,22 +15,15 @@
#ifndef ISC_SERVER_COMMON_PORTCONFIG_H
#define ISC_SERVER_COMMON_PORTCONFIG_H
+#include <cc/data.h>
+
+#include <asiodns/dns_service.h>
+
#include <utility>
#include <string>
#include <stdint.h>
#include <vector>
-#include <cc/data.h>
-
-/*
- * Some forward declarations.
- */
-namespace isc {
-namespace asiodns {
-class DNSService;
-}
-}
-
namespace isc {
namespace server_common {
/**
@@ -88,36 +81,49 @@ AddressList
parseAddresses(isc::data::ConstElementPtr addresses,
const std::string& elemName);
-/**
- * \brief Changes current listening addresses and ports.
- *
- * Removes all sockets we currently listen on and starts listening on the
- * addresses and ports requested in newAddresses.
- *
- * If it fails to set up the new addresses, it attempts to roll back to the
- * previous addresses (but it still propagates the exception). If the rollback
- * fails as well, it aborts the application (it assumes if it can't listen
- * on the new addresses nor on the old ones, the application is useless anyway
- * and should be restarted by Boss, not to mention that the internal state is
- * probably broken).
- *
- * \param newAddresses are the addresses you want to listen on.
- * \param addressStore is the place you store your current addresses. It is
- * used when there's a need for rollback. The newAddresses are copied here
- * when the change is successful.
- * \param dnsService is the DNSService object we use now. The requests from
- * the new sockets are handled using this dnsService (and all current
- * sockets on the service are closed first).
- * \throw asiolink::IOError when initialization or closing of socket fails.
- * \throw std::bad_alloc when allocation fails.
- */
+/// \brief Changes current listening addresses and ports.
+///
+/// Removes all sockets we currently listen on and starts listening on the
+/// addresses and ports requested in new_addresses.
+///
+/// If it fails to set up the new addresses, it attempts to roll back to the
+/// previous addresses (but it still propagates the exception). If the rollback
+/// fails as well, it doesn't abort the application (to allow reconfiguration),
+/// but removes all the sockets it listened on. One of the exceptions is
+/// propagated.
+///
+/// The ports are requested from the socket creator through boss. Therefore
+/// you need to initialize the SocketRequestor before using this function.
+///
+/// \param new_addresses are the addresses you want to listen on.
+/// \param address_store is the place you store your current addresses. It is
+/// used when there's a need for rollback. The new_addresses are copied
+/// here when the change is successful.
+/// \param dns_service is the DNSService object we use now. The requests from
+/// the new sockets are handled using this dns_service (and all current
+/// sockets on the service are closed first).
+/// \param server_options specifies optional properties for the servers
+/// created via \c dns_service.
+///
+/// \throw asiolink::IOError when initialization or closing of socket fails.
+/// \throw isc::server_common::SocketRequestor::Socket error when the
+/// boss/socket creator doesn't want to give us the socket.
+/// \throw std::bad_alloc when allocation fails.
+/// \throw isc::InvalidOperation when the function is called and the
+/// SocketRequestor isn't initialized yet.
void
-installListenAddresses(const AddressList& newAddresses,
- AddressList& addressStore,
- asiodns::DNSService& dnsService);
+installListenAddresses(const AddressList& new_addresses,
+ AddressList& address_store,
+ asiodns::DNSServiceBase& dns_service,
+ asiodns::DNSService::ServerFlag server_options =
+ asiodns::DNSService::SERVER_DEFAULT);
}
}
}
#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/server_common/server_common_messages.mes b/src/lib/server_common/server_common_messages.mes
index 5fbbb0b..0e4efa5 100644
--- a/src/lib/server_common/server_common_messages.mes
+++ b/src/lib/server_common/server_common_messages.mes
@@ -16,6 +16,31 @@ $NAMESPACE isc::server_common
# \brief Messages for the server_common library
+% SOCKETREQUESTOR_CREATED Socket requestor created for application %1
+Debug message. A socket requesor (client of the socket creator) is created
+for the corresponding application. Normally this should happen at most
+one time throughout the lifetime of the application.
+
+% SOCKETREQUESTOR_DESTROYED Socket requestor destoryed
+Debug message. The socket requestor created at SOCKETREQUESTOR_CREATED
+has been destroyed. This event is generally unexpected other than in
+test cases.
+
+% SOCKETREQUESTOR_GETSOCKET Received a %1 socket for [%2]:%3, FD=%4, token=%5, path=%6
+Debug message. The socket requestor for the corresponding application
+has requested a socket for a set of address, port and protocol (shown
+in the log message) and successfully got it from the creator. The
+corresponding file descriptor and the associated "token" (an internal
+ID used between the creator and requestor) are shown in the log
+message.
+
+% SOCKETREQUESTOR_RELEASESOCKET Released a socket of token %1
+Debug message. The socket requestor has released a socket passed by
+the creator. The associated token of the socket is shown in the
+log message. If the corresponding SOCKETREQUESTOR_GETSOCKET was logged
+more detailed information of the socket can be identified by matching
+the token.
+
% SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1
This points to an error in configuration. What was supposed to be a list of
IP address - port pairs isn't a list at all but something else.
@@ -38,7 +63,7 @@ message. A valid specification contains an address part (which must be a string
and must represent a valid IPv4 or IPv6 address) and port (which must be an
integer in the range valid for TCP/UDP ports on your system).
-% SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)
+% SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%1)
The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
the reason listed.
@@ -53,6 +78,17 @@ addresses we are going to listen on (eg. there will be one log message
per pair). This appears only after SRVCOMM_SET_LISTEN, but might
be hidden, as it has higher debug level.
+% SRVCOMM_EXCEPTION_ALLOC exception when allocating a socket: %1
+The process tried to allocate a socket using the socket creator, but an error
+occurred. But it is not one of the errors we are sure are "safe". In this case
+it is unclear if the unsuccessful communication left the process and the bind10
+process in inconsistent state, so the process is going to abort to prevent
+further problems in that area.
+
+This is probably a bug in the code, but it could be caused by other unusual
+conditions (like insufficient memory, deleted socket file used for
+communication).
+
% SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring
Debug message indicating that the server is deinitializing the TSIG keyring.
@@ -71,3 +107,8 @@ specification is outside the valid range of 0 to 65535.
% SRVCOMM_SET_LISTEN setting addresses to listen to
Debug message, noting that the server is about to start listening on a
different set of IP addresses and ports than before.
+
+% SRVCOMM_UNKNOWN_EXCEPTION_ALLOC unknown exception when allocating a socket
+The situation is the same as in the SRVCOMM_EXCEPTION_ALLOC case, but further
+details about the error are unknown, because it was signaled by throwing
+something not being an exception. This is definitely a bug.
diff --git a/src/lib/server_common/socket_request.cc b/src/lib/server_common/socket_request.cc
new file mode 100644
index 0000000..e471ad0
--- /dev/null
+++ b/src/lib/server_common/socket_request.cc
@@ -0,0 +1,428 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#include <config.h>
+
+#include "socket_request.h"
+#include <server_common/logger.h>
+
+#include <config/ccsession.h>
+#include <cc/session.h>
+#include <cc/data.h>
+#include <util/io/fd.h>
+#include <util/io/fd_share.h>
+
+#include <sys/un.h>
+#include <sys/socket.h>
+#include <cerrno>
+#include <csignal>
+#include <cstddef>
+
+namespace isc {
+namespace server_common {
+
+namespace {
+SocketRequestor* requestor(NULL);
+
+// Before the boss process calls send_fd, it first sends this
+// string to indicate success, followed by the file descriptor
+const std::string& CREATOR_SOCKET_OK() {
+ static const std::string str("1\n");
+ return (str);
+}
+
+// Before the boss process calls send_fd, it sends this
+// string to indicate failure. It will not send a file descriptor.
+const std::string& CREATOR_SOCKET_UNAVAILABLE() {
+ static const std::string str("0\n");
+ return (str);
+}
+
+// The name of the ccsession command to request a socket from boss
+// (the actual format of command and response are hardcoded in their
+// respective methods)
+const std::string& REQUEST_SOCKET_COMMAND() {
+ static const std::string str("get_socket");
+ return (str);
+}
+
+// The name of the ccsession command to tell boss we no longer need
+// a socket (the actual format of command and response are hardcoded
+// in their respective methods)
+const std::string& RELEASE_SOCKET_COMMAND() {
+ static const std::string str("drop_socket");
+ return (str);
+}
+
+// RCode constants for the get_token command
+const size_t SOCKET_ERROR_CODE = 2;
+const size_t SHARE_ERROR_CODE = 3;
+
+// A helper converter from numeric protocol ID to the corresponding string.
+// used both for generating a message for the boss process and for logging.
+inline const char*
+protocolString(SocketRequestor::Protocol protocol) {
+ switch (protocol) {
+ case SocketRequestor::TCP:
+ return ("TCP");
+ case SocketRequestor::UDP:
+ return ("UDP");
+ default:
+ return ("unknown protocol");
+ }
+}
+
+// Creates the cc session message to request a socket.
+// The actual command format is hardcoded, and should match
+// the format as read in bind10_src.py.in
+isc::data::ConstElementPtr
+createRequestSocketMessage(SocketRequestor::Protocol protocol,
+ const std::string& address, uint16_t port,
+ SocketRequestor::ShareMode share_mode,
+ const std::string& share_name)
+{
+ const isc::data::ElementPtr request = isc::data::Element::createMap();
+ request->set("address", isc::data::Element::create(address));
+ request->set("port", isc::data::Element::create(port));
+ if (protocol != SocketRequestor::TCP && protocol != SocketRequestor::UDP) {
+ isc_throw(InvalidParameter, "invalid protocol: " << protocol);
+ }
+ request->set("protocol",
+ isc::data::Element::create(protocolString(protocol)));
+ switch (share_mode) {
+ case SocketRequestor::DONT_SHARE:
+ request->set("share_mode", isc::data::Element::create("NO"));
+ break;
+ case SocketRequestor::SHARE_SAME:
+ request->set("share_mode", isc::data::Element::create("SAMEAPP"));
+ break;
+ case SocketRequestor::SHARE_ANY:
+ request->set("share_mode", isc::data::Element::create("ANY"));
+ break;
+ default:
+ isc_throw(InvalidParameter, "invalid share mode: " << share_mode);
+ }
+ request->set("share_name", isc::data::Element::create(share_name));
+
+ return (isc::config::createCommand(REQUEST_SOCKET_COMMAND(), request));
+}
+
+isc::data::ConstElementPtr
+createReleaseSocketMessage(const std::string& token) {
+ const isc::data::ElementPtr release = isc::data::Element::createMap();
+ release->set("token", isc::data::Element::create(token));
+
+ return (isc::config::createCommand(RELEASE_SOCKET_COMMAND(), release));
+}
+
+// Checks and parses the response receive from Boss
+// If successful, token and path will be set to the values found in the
+// answer.
+// If the response was an error response, or does not contain the
+// expected elements, a CCSessionError is raised.
+void
+readRequestSocketAnswer(isc::data::ConstElementPtr recv_msg,
+ std::string& token, std::string& path)
+{
+ int rcode;
+ isc::data::ConstElementPtr answer = isc::config::parseAnswer(rcode,
+ recv_msg);
+ // Translate known rcodes to the corresponding exceptions
+ if (rcode == SOCKET_ERROR_CODE) {
+ isc_throw(SocketRequestor::SocketAllocateError, answer->str());
+ }
+ if (rcode == SHARE_ERROR_CODE) {
+ isc_throw(SocketRequestor::ShareError, answer->str());
+ }
+ // The unknown exceptions
+ if (rcode != 0) {
+ isc_throw(isc::config::CCSessionError,
+ "Error response when requesting socket: " << answer->str());
+ }
+
+ if (!answer || !answer->contains("token") || !answer->contains("path")) {
+ isc_throw(isc::config::CCSessionError,
+ "Malformed answer when requesting socket");
+ }
+ token = answer->get("token")->stringValue();
+ path = answer->get("path")->stringValue();
+}
+
+// Connect to the domain socket that has been received from Boss.
+// (i.e. the one that is used to pass created sockets over).
+//
+// This should only be called if the socket had not been connected to
+// already. To get the socket and reuse existing ones, use
+// getFdShareSocket()
+//
+// \param path The domain socket to connect to
+// \exception SocketError if the socket cannot be connected to
+// \return the socket file descriptor
+int
+createFdShareSocket(const std::string& path) {
+ // TODO: Current master has socketsession code and better way
+ // of handling errors without potential leaks for this. It is
+ // not public there at this moment, but when this is merged
+ // we should make a ticket to move this functionality to the
+ // SocketSessionReceiver and use that.
+ const int sock_pass_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock_pass_fd == -1) {
+ isc_throw(SocketRequestor::SocketError,
+ "Unable to open domain socket " << path <<
+ ": " << strerror(errno));
+ }
+ struct sockaddr_un sock_pass_addr;
+ sock_pass_addr.sun_family = AF_UNIX;
+ if (path.size() >= sizeof(sock_pass_addr.sun_path)) {
+ close(sock_pass_fd);
+ isc_throw(SocketRequestor::SocketError,
+ "Unable to open domain socket " << path <<
+ ": path too long");
+ }
+#ifdef HAVE_SA_LEN
+ sock_pass_addr.sun_len = path.size();
+#endif
+ strcpy(sock_pass_addr.sun_path, path.c_str());
+ const socklen_t len = path.size() + offsetof(struct sockaddr_un, sun_path);
+ // Yes, C-style cast bad. See previous comment about SocketSessionReceiver.
+ if (connect(sock_pass_fd, (const struct sockaddr*)&sock_pass_addr,
+ len) == -1) {
+ close(sock_pass_fd);
+ isc_throw(SocketRequestor::SocketError,
+ "Unable to open domain socket " << path <<
+ ": " << strerror(errno));
+ }
+ return (sock_pass_fd);
+}
+
+// Reads a socket fd over the given socket (using recv_fd()).
+//
+// \exception SocketError if the socket cannot be read
+// \return the socket fd that has been read
+int
+getSocketFd(const std::string& token, int sock_pass_fd) {
+ // Tell the boss the socket token.
+ const std::string token_data = token + "\n";
+ if (!isc::util::io::write_data(sock_pass_fd, token_data.c_str(),
+ token_data.size())) {
+ isc_throw(SocketRequestor::SocketError, "Error writing socket token");
+ }
+
+ // Boss first sends some data to signal that getting the socket
+ // from its cache succeeded
+ char status[3]; // We need a space for trailing \0, hence 3
+ memset(status, 0, 3);
+ if (isc::util::io::read_data(sock_pass_fd, status, 2) < 2) {
+ isc_throw(SocketRequestor::SocketError,
+ "Error reading status code while requesting socket");
+ }
+ // Actual status value hardcoded by boss atm.
+ if (CREATOR_SOCKET_UNAVAILABLE() == status) {
+ isc_throw(SocketRequestor::SocketError,
+ "CREATOR_SOCKET_UNAVAILABLE returned");
+ } else if (CREATOR_SOCKET_OK() != status) {
+ isc_throw(SocketRequestor::SocketError,
+ "Unknown status code returned before recv_fd '" << status <<
+ "'");
+ }
+
+ const int passed_sock_fd = isc::util::io::recv_fd(sock_pass_fd);
+
+ // check for error values of passed_sock_fd (see fd_share.h)
+ if (passed_sock_fd < 0) {
+ switch (passed_sock_fd) {
+ case isc::util::io::FD_SYSTEM_ERROR:
+ isc_throw(SocketRequestor::SocketError,
+ "FD_SYSTEM_ERROR while requesting socket");
+ break;
+ case isc::util::io::FD_OTHER_ERROR:
+ isc_throw(SocketRequestor::SocketError,
+ "FD_OTHER_ERROR while requesting socket");
+ break;
+ default:
+ isc_throw(SocketRequestor::SocketError,
+ "Unknown error while requesting socket");
+ }
+ }
+ return (passed_sock_fd);
+}
+
+// This implementation class for SocketRequestor uses
+// a CC session for communication with the boss process,
+// and fd_share to read out the socket(s).
+// Since we only use a reference to the session, it must never
+// be closed during the lifetime of this class
+class SocketRequestorCCSession : public SocketRequestor {
+public:
+ SocketRequestorCCSession(cc::AbstractSession& session,
+ const std::string& app_name) :
+ session_(session),
+ app_name_(app_name)
+ {
+ // We need to filter SIGPIPE to prevent it from happening in
+ // getSocketFd() while writing to the UNIX domain socket after the
+ // remote end closed it. See lib/util/io/socketsession for more
+ // background details.
+ // Note: we should eventually unify this level of details into a single
+ // module. Setting a single filter here should be considered a short
+ // term workaround.
+ if (std::signal(SIGPIPE, SIG_IGN) == SIG_ERR) {
+ isc_throw(Unexpected, "Failed to filter SIGPIPE: " <<
+ strerror(errno));
+ }
+ LOG_DEBUG(logger, DBGLVL_TRACE_BASIC, SOCKETREQUESTOR_CREATED).
+ arg(app_name);
+ }
+
+ ~SocketRequestorCCSession() {
+ closeFdShareSockets();
+ LOG_DEBUG(logger, DBGLVL_TRACE_BASIC, SOCKETREQUESTOR_DESTROYED);
+ }
+
+ virtual SocketID requestSocket(Protocol protocol,
+ const std::string& address,
+ uint16_t port, ShareMode share_mode,
+ const std::string& share_name)
+ {
+ const isc::data::ConstElementPtr request_msg =
+ createRequestSocketMessage(protocol, address, port,
+ share_mode,
+ share_name.empty() ? app_name_ :
+ share_name);
+
+ // Send it to boss
+ const int seq = session_.group_sendmsg(request_msg, "Boss");
+
+ // Get the answer from the boss.
+ // Just do a blocking read, we can't really do much anyway
+ isc::data::ConstElementPtr env, recv_msg;
+ if (!session_.group_recvmsg(env, recv_msg, false, seq)) {
+ isc_throw(isc::config::CCSessionError,
+ "Incomplete response when requesting socket");
+ }
+
+ // Read the socket file from the answer
+ std::string token, path;
+ readRequestSocketAnswer(recv_msg, token, path);
+ // get the domain socket over which we will receive the
+ // real socket
+ const int sock_pass_fd = getFdShareSocket(path);
+
+ // and finally get the socket itself
+ const int passed_sock_fd = getSocketFd(token, sock_pass_fd);
+ LOG_DEBUG(logger, DBGLVL_TRACE_DETAIL, SOCKETREQUESTOR_GETSOCKET).
+ arg(protocolString(protocol)).arg(address).arg(port).
+ arg(passed_sock_fd).arg(token).arg(path);
+ return (SocketID(passed_sock_fd, token));
+ }
+
+ virtual void releaseSocket(const std::string& token) {
+ const isc::data::ConstElementPtr release_msg =
+ createReleaseSocketMessage(token);
+
+ // Send it to boss
+ const int seq = session_.group_sendmsg(release_msg, "Boss");
+ LOG_DEBUG(logger, DBGLVL_TRACE_DETAIL, SOCKETREQUESTOR_RELEASESOCKET).
+ arg(token);
+
+ // Get the answer from the boss.
+ // Just do a blocking read, we can't really do much anyway
+ isc::data::ConstElementPtr env, recv_msg;
+ if (!session_.group_recvmsg(env, recv_msg, false, seq)) {
+ isc_throw(isc::config::CCSessionError,
+ "Incomplete response when sending drop socket command");
+ }
+
+ // Answer should just be success
+ int rcode;
+ isc::data::ConstElementPtr error = isc::config::parseAnswer(rcode,
+ recv_msg);
+ if (rcode != 0) {
+ isc_throw(SocketError,
+ "Error requesting release of socket: " << error->str());
+ }
+ }
+
+private:
+ // Returns the domain socket file descriptor
+ // If we had not opened it yet, opens it now
+ int
+ getFdShareSocket(const std::string& path) {
+ if (fd_share_sockets_.find(path) == fd_share_sockets_.end()) {
+ const int new_fd = createFdShareSocket(path);
+ // Technically, the (creation and) assignment of the new map entry
+ // could thrown an exception and lead to FD leak. This should be
+ // cleaned up later (see comment about SocketSessionReceiver above)
+ fd_share_sockets_[path] = new_fd;
+ return (new_fd);
+ } else {
+ return (fd_share_sockets_[path]);
+ }
+ }
+
+ // Closes the sockets that has been used for fd_share
+ void
+ closeFdShareSockets() {
+ for (std::map<std::string, int>::const_iterator it =
+ fd_share_sockets_.begin();
+ it != fd_share_sockets_.end();
+ ++it) {
+ close((*it).second);
+ }
+ }
+
+ cc::AbstractSession& session_;
+ const std::string app_name_;
+ std::map<std::string, int> fd_share_sockets_;
+};
+
+}
+
+SocketRequestor&
+socketRequestor() {
+ if (requestor != NULL) {
+ return (*requestor);
+ } else {
+ isc_throw(InvalidOperation, "The socket requestor is not initialized");
+ }
+}
+
+void
+initSocketRequestor(cc::AbstractSession& session,
+ const std::string& app_name)
+{
+ if (requestor != NULL) {
+ isc_throw(InvalidOperation,
+ "The socket requestor was already initialized");
+ } else {
+ requestor = new SocketRequestorCCSession(session, app_name);
+ }
+}
+
+void
+initTestSocketRequestor(SocketRequestor* new_requestor) {
+ requestor = new_requestor;
+}
+
+void
+cleanupSocketRequestor() {
+ if (requestor != NULL) {
+ delete requestor;
+ requestor = NULL;
+ } else {
+ isc_throw(InvalidOperation, "The socket requestor is not initialized");
+ }
+}
+
+}
+}
diff --git a/src/lib/server_common/socket_request.h b/src/lib/server_common/socket_request.h
new file mode 100644
index 0000000..6059978
--- /dev/null
+++ b/src/lib/server_common/socket_request.h
@@ -0,0 +1,282 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SOCKET_REQUEST_H
+#define __SOCKET_REQUEST_H 1
+
+#include <exceptions/exceptions.h>
+
+#include <boost/noncopyable.hpp>
+#include <utility>
+#include <string>
+#include <stdint.h>
+
+namespace isc {
+
+namespace cc {
+class AbstractSession;
+};
+
+namespace server_common {
+
+/// \brief A singleton class for requesting sockets
+///
+/// This class allows requesting sockets from the socket creator.
+///
+/// It is considered to be a singleton - a class which is instantiated
+/// at most once in the whole application. This is because it makes no
+/// sense to have two of them.
+///
+/// This is actually an abstract base class. There'll be one with
+/// hidden implementation and we expect the tests to create its own
+/// subclass when needed.
+///
+/// \see socketRequestor function to access the object of this class.
+class SocketRequestor : boost::noncopyable {
+protected:
+ /// \brief Protected constructor
+ ///
+ /// The constructor is protected so this class is not created by accident
+ /// (which it can't anyway, as it has pure virtual methods, but just to
+ /// be sure).
+ SocketRequestor() {}
+
+public:
+ /// \brief virtual destructor
+ ///
+ /// A virtual destructor, as we have virtual methods, to make sure it is
+ /// destroyed by the destructor of the subclass. This shouldn't matter, as
+ /// a singleton class wouldn't get destroyed, but just to be sure.
+ virtual ~ SocketRequestor() {}
+
+ /// \brief A representation of received socket
+ ///
+ /// The pair holds two parts. The OS-level file descriptor acting as the
+ /// socket (you might want to use it directly with functions like recv,
+ /// or fill it into an asio socket). The other part is the token
+ /// representing the socket, which allows it to be given up again.
+#ifdef _WIN32
+ typedef std::pair<int, std::string> SocketID;
+#else
+ typedef std::pair<SOCKET, std::string> SocketID;
+#endif
+
+ /// \brief The protocol of requested socket
+ ///
+ /// This describes which protocol the socket should have when created.
+ enum Protocol {
+ UDP,
+ TCP
+ };
+
+ /// \brief The share mode of the requested socket
+ ///
+ /// The socket creator is able to "borrow" the same socket to multiple
+ /// applications at once. However, it isn't always what is required. This
+ /// describes the restrains we want to have on our socket regarding the
+ /// sharing. Union of restriction of all requests on the given socket
+ /// is taken (so you still don't have to get your socket even if you
+ /// say SHARE_ANY, because someone else might already asked for the socket
+ /// with DONT_SHARE).
+ enum ShareMode {
+ DONT_SHARE, //< Request an exclusive ownership of the socket.
+ SHARE_SAME, //< It is possible to share the socket with anybody who
+ //< provided the same share_name.
+ SHARE_ANY //< Any sharing is allowed.
+ };
+
+ /// \brief Exception when we can't manipulate a socket
+ ///
+ /// This is thrown if the other side doesn't want to comply to our
+ /// requests, like when we ask for a socket already held by someone
+ /// else or ask for nonsense (releasing a socket we don't own).
+ class SocketError : public Exception {
+ public:
+ SocketError(const char* file, size_t line, const char* what) :
+ Exception(file, line, what)
+ { }
+ };
+
+ /// \brief Exception when we can't return a requested socket, but we're
+ /// sure we could return others
+ ///
+ /// This is thrown if the requested socket can't be granted, but it is only
+ /// that one socket, not that the system would be broken or anything. This
+ /// exception is a common base class for the concrete exceptions actually
+ /// thrown. You can safely keep using the SocketRequestor after this
+ /// exception (or anything derived from it) is thrown.
+ ///
+ /// \see ShareError
+ /// \see SocketAllocateError
+ class NonFatalSocketError : public SocketError {
+ public:
+ NonFatalSocketError(const char* file, size_t line, const char* what) :
+ SocketError(file, line, what)
+ { }
+ };
+
+ /// \brief Exception when the socket is allocated by other bind10 module
+ /// and it doesn't want to share it.
+ ///
+ /// This is thrown if a socket is requested and the socket is already
+ /// allocated by bind10, but other bind10 module(s) is using it and
+ /// the sharing parameters are incompatible (the socket can't be shared
+ /// between the module and our module).
+ class ShareError : public NonFatalSocketError {
+ public:
+ ShareError(const char* file, size_t line, const char* what) :
+ NonFatalSocketError(file, line, what)
+ { }
+ };
+
+ /// \brief Exception when the operating system doesn't allow us to create
+ /// the requested socket.
+ ///
+ /// This happens when the socket() or bind() call fails in the socket
+ /// creator. This can happen when the address/port pair is already taken
+ /// by a different application, the socket creator doesn't have enough
+ /// privileges, or for some kind of similar reason.
+ class SocketAllocateError : public NonFatalSocketError {
+ public:
+ SocketAllocateError(const char* file, size_t line, const char* what) :
+ NonFatalSocketError(file, line, what)
+ { }
+ };
+
+ /// \brief Ask for a socket
+ ///
+ /// Asks the socket creator to give us a socket. The socket will be bound
+ /// to the given address and port.
+ ///
+ /// \param protocol specifies the protocol of the socket. This must be
+ /// either UDP or TCP.
+ /// \param address to which the socket should be bound.
+ /// \param port the port to which the socket should be bound (native endian,
+ /// not network byte order).
+ /// \param share_mode how the socket can be shared with other requests.
+ /// This must be one of the defined values of ShareMode..
+ /// \param share_name the name of sharing group, relevant for SHARE_SAME
+ /// (specified by us or someone else). If left empty (the default),
+ /// the app_name parameter of initSocketRequestor is used. If that one
+ /// is empty as well, it is accepted, but not recommended, as such
+ /// a non-descriptive name has a high chance of collisions between
+ /// applications. Note that you should provide a name (by share_name
+ /// or app_name) even when you set it to DONT_SHARE (for logs and
+ /// debugging) and you need to provide one with SHARE_SAME (to know
+ /// what is same) and SHARE_ANY (someone else might want SHARE_SAME,
+ /// so it would check against this)
+ /// \return the socket, as a file descriptor and token representing it on
+ /// the socket creator side.
+ ///
+ /// To understand the modes better:
+ /// - If mode is DONT_SHARE, it succeeds if no one else has opened an FD
+ /// for requested protocol, address and port.
+ /// - If mode is SHARE_SAME, it succeeds if all applications who opened an
+ /// FD for the requested protocol, address and port provided the same
+ /// share_name as this one and none of them had mode DONT_SHARE.
+ /// - If mode is SHARE_ANY, it succeeds if no applications who requested
+ /// the same potocol, address and port provided DONT_SHARE and all the
+ /// applications who provided SHARE_SAME also provided the same
+ /// share_name as this process did.
+ ///
+ /// \throw InvalidParameter protocol or share_mode is invalid
+ /// \throw CCSessionError when we have a problem talking over the CC
+ /// session.
+ /// \throw SocketError in case we have some other problems receiving the
+ /// socket (eg. inconsistency in the protocol, the socket got stuck
+ /// in the transport, etc). If the exception is not of the following
+ /// derived ones, it usualy means something serious happened.
+ /// \throw SocketAllocateError if the other side can't create the socket.
+ /// \throw ShareError if the socket is used by other bind10 module and
+ /// that one doesn't want to share it with us.
+ virtual SocketID requestSocket(Protocol protocol,
+ const std::string& address,
+ uint16_t port, ShareMode share_mode,
+ const std::string& share_name = "") = 0;
+
+ /// \brief Tell the socket creator we no longer need the socket
+ ///
+ /// Releases the identified socket. This must be called *after*
+ /// the file descriptor was closed on our side. This will allow
+ /// the remote side to either give it to some other application
+ /// or close it, depending on the situation.
+ ///
+ /// \param token the token representing the socket, as received
+ /// in the second part of the requestSocket result.
+ /// \throw CCSessionError when we have a problem talking over the CC
+ /// session.
+ /// \throw SocketError in case the other side doesn't like the
+ /// release (like we're trying to release a socket that doesn't
+ /// belong to us or exist at all).
+ virtual void releaseSocket(const std::string& token) = 0;
+};
+
+/// \brief Access the requestor object.
+///
+/// This returns the singleton object for the Requestor.
+///
+/// \return the active socket requestor object.
+/// \throw InvalidOperation if the object was not yet initialized.
+/// \see SocketRequestor::init to initialize the object.
+SocketRequestor& socketRequestor();
+
+/// \brief Initialize the singleton object
+///
+/// This creates the object that will be used to request sockets.
+/// It can be called only once per the life of application.
+///
+/// \param session the CC session that'll be used to talk to the
+/// socket creator.
+/// \param app_name default share name if one is not provided with
+/// requestSocket. You can leave this as empty string,
+/// but then you should provide a reasonably descriptive
+/// name to requestSocket. Empty names work like any others,
+/// but have a high chance of collisions, so it is recommended
+/// to avoid them and provide the name of the application
+/// here.
+/// \throw InvalidOperation when it is called more than once
+void initSocketRequestor(cc::AbstractSession& session,
+ const std::string& app_name);
+
+/// \brief Initialization for tests
+///
+/// This is to support different subclasses in tests. It replaces
+/// the object used by socketRequestor() function by this one provided
+/// as parameter. The ownership is not taken, eg. it's up to the caller
+/// to delete it when necessary.
+///
+/// This is not to be used in production applications. It is meant as
+/// an replacement of init.
+///
+/// This never throws.
+///
+/// \param requestor the object to be used. It can be NULL to reset to
+/// an "virgin" state (which acts as if initTest or init was never
+/// called before).
+void initTestSocketRequestor(SocketRequestor* requestor);
+
+/// \brief Destroy the singleton instance
+///
+/// Calling this function is not strictly necessary; the socket
+/// requestor is a singleton anyway. However, for some tests it
+/// is useful to destroy and recreate it, as well as for programs
+/// that want to be completely clean on exit.
+/// After this function has been called, all operations except init
+/// will fail.
+void cleanupSocketRequestor();
+
+}
+}
+
+#endif // __SOCKET_REQUEST_H
diff --git a/src/lib/server_common/tests/.gitignore b/src/lib/server_common/tests/.gitignore
new file mode 100644
index 0000000..0749d37
--- /dev/null
+++ b/src/lib/server_common/tests/.gitignore
@@ -0,0 +1,2 @@
+/data_path.h
+/run_unittests
diff --git a/src/lib/server_common/tests/Makefile.am b/src/lib/server_common/tests/Makefile.am
index d7e113a..d6498f1 100644
--- a/src/lib/server_common/tests/Makefile.am
+++ b/src/lib/server_common/tests/Makefile.am
@@ -22,6 +22,9 @@ endif
CLEANFILES = *.gcno *.gcda
+TESTS_ENVIRONMENT = \
+ $(LIBTOOL) --mode=execute $(VALGRIND_COMMAND)
+
TESTS =
if HAVE_GTEST
TESTS += run_unittests
@@ -29,6 +32,7 @@ run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += client_unittest.cc
run_unittests_SOURCES += portconfig_unittest.cc
run_unittests_SOURCES += keyring_test.cc
+run_unittests_SOURCES += socket_requestor_test.cc
nodist_run_unittests_SOURCES = data_path.h
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
diff --git a/src/lib/server_common/tests/client_unittest.cc b/src/lib/server_common/tests/client_unittest.cc
index 1ec0081..b308ef7 100644
--- a/src/lib/server_common/tests/client_unittest.cc
+++ b/src/lib/server_common/tests/client_unittest.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#ifndef _WIN32
+#include <sys/types.h>
#include <sys/socket.h>
#endif
#include <string.h>
diff --git a/src/lib/server_common/tests/portconfig_unittest.cc b/src/lib/server_common/tests/portconfig_unittest.cc
index 0c77c9b..9cb0d79 100644
--- a/src/lib/server_common/tests/portconfig_unittest.cc
+++ b/src/lib/server_common/tests/portconfig_unittest.cc
@@ -19,22 +19,30 @@
#include <mswsock.h>
#endif
+#include <gtest/gtest.h>
+
#include <server_common/portconfig.h>
+#include <testutils/socket_request.h>
+#include <testutils/mockups.h>
+
+#include <util/unittests/resource.h>
#include <cc/data.h>
#include <exceptions/exceptions.h>
#include <asiolink/asiolink.h>
#include <asiodns/asiodns.h>
-#include <gtest/gtest.h>
#include <string>
using namespace isc::server_common::portconfig;
+using namespace isc::server_common;
using namespace isc::data;
using namespace isc;
using namespace std;
using namespace isc::asiolink;
using namespace isc::asiodns;
+using namespace isc::testutils;
+using boost::lexical_cast;
namespace {
@@ -136,26 +144,31 @@ TEST_F(ParseAddresses, invalid) {
// Test fixture for installListenAddresses
struct InstallListenAddresses : public ::testing::Test {
InstallListenAddresses() :
- dnss_(ios_, NULL, NULL, NULL)
+ // The empty string is expected parameter of requestSocket,
+ // not app_name - the request does not fall back to this, it
+ // is checked to be the same.
+ sock_requestor_(dnss_, store_, 5288, "")
{
valid_.push_back(AddressPair("127.0.0.1", 5288));
valid_.push_back(AddressPair("::1", 5288));
+ invalid_.push_back(AddressPair("127.0.0.1", 5288));
invalid_.push_back(AddressPair("192.0.2.2", 1));
}
- IOService ios_;
- DNSService dnss_;
+ MockDNSService dnss_;
AddressList store_;
+ isc::testutils::TestSocketRequestor sock_requestor_;
// We should be able to bind to these addresses
AddressList valid_;
// But this shouldn't work
AddressList invalid_;
// Check that the store_ addresses are the same as expected
- void checkAddresses(const AddressList& expected, const string& name) {
+ void checkAddresses(const AddressList& expected, const string& name) const
+ {
SCOPED_TRACE(name);
ASSERT_EQ(expected.size(), store_.size()) <<
"Different amount of elements, not checking content";
- // Run in parallel trough the vectors
+ // Run in parallel through the vectors
for (AddressList::const_iterator ei(expected.begin()),
si(store_.begin()); ei != expected.end(); ++ei, ++si) {
EXPECT_EQ(ei->first, si->first);
@@ -165,17 +178,46 @@ struct InstallListenAddresses : public ::testing::Test {
};
// Try switching valid addresses
+// Check the sockets are correctly requested and returned
TEST_F(InstallListenAddresses, valid) {
// First, bind to the valid addresses
EXPECT_NO_THROW(installListenAddresses(valid_, store_, dnss_));
checkAddresses(valid_, "Valid addresses");
+ const char* tokens1[] = {
+ "TCP:127.0.0.1:5288:1",
+ "UDP:127.0.0.1:5288:2",
+ "TCP:::1:5288:3",
+ "UDP:::1:5288:4",
+ NULL
+ };
+ const char* no_tokens[] = { NULL };
+ sock_requestor_.checkTokens(tokens1, sock_requestor_.given_tokens_,
+ "Valid given tokens 1");
+ sock_requestor_.checkTokens(no_tokens, sock_requestor_.released_tokens_,
+ "Valid no released tokens 1");
// TODO Maybe some test to actually connect to them
// Try setting it back to nothing
+ sock_requestor_.given_tokens_.clear();
EXPECT_NO_THROW(installListenAddresses(AddressList(), store_, dnss_));
checkAddresses(AddressList(), "No addresses");
+ sock_requestor_.checkTokens(no_tokens, sock_requestor_.given_tokens_,
+ "Valid no given tokens");
+ sock_requestor_.checkTokens(tokens1, sock_requestor_.released_tokens_,
+ "Valid released tokens");
// Try switching back again
EXPECT_NO_THROW(installListenAddresses(valid_, store_, dnss_));
checkAddresses(valid_, "Valid addresses");
+ const char* tokens2[] = {
+ "TCP:127.0.0.1:5288:5",
+ "UDP:127.0.0.1:5288:6",
+ "TCP:::1:5288:7",
+ "UDP:::1:5288:8",
+ NULL
+ };
+ sock_requestor_.checkTokens(tokens2, sock_requestor_.given_tokens_,
+ "Valid given tokens 2");
+ sock_requestor_.checkTokens(tokens1, sock_requestor_.released_tokens_,
+ "Valid released tokens");
}
// Try if rollback works
@@ -183,9 +225,138 @@ TEST_F(InstallListenAddresses, rollback) {
// Set some addresses
EXPECT_NO_THROW(installListenAddresses(valid_, store_, dnss_));
checkAddresses(valid_, "Before rollback");
+ const char* tokens1[] = {
+ "TCP:127.0.0.1:5288:1",
+ "UDP:127.0.0.1:5288:2",
+ "TCP:::1:5288:3",
+ "UDP:::1:5288:4",
+ NULL
+ };
+ const char* no_tokens[] = { NULL };
+ sock_requestor_.checkTokens(tokens1, sock_requestor_.given_tokens_,
+ "Given before rollback");
+ sock_requestor_.checkTokens(no_tokens, sock_requestor_.released_tokens_,
+ "Released before rollback");
+ sock_requestor_.given_tokens_.clear();
// This should not bind them, but should leave the original addresses
- EXPECT_THROW(installListenAddresses(invalid_, store_, dnss_), exception);
+ EXPECT_THROW(installListenAddresses(invalid_, store_, dnss_),
+ SocketRequestor::SocketError);
checkAddresses(valid_, "After rollback");
+ // Now, it should have requested first pair of sockets from the invalids
+ // and, as the second failed, it should have returned them right away.
+ const char* released1[] = {
+ "TCP:127.0.0.1:5288:1",
+ "UDP:127.0.0.1:5288:2",
+ "TCP:::1:5288:3",
+ "UDP:::1:5288:4",
+ "TCP:127.0.0.1:5288:5",
+ "UDP:127.0.0.1:5288:6",
+ NULL
+ };
+ // It should request the first pair of sockets, and then request the
+ // complete set of valid addresses to rollback
+ const char* tokens2[] = {
+ "TCP:127.0.0.1:5288:5",
+ "UDP:127.0.0.1:5288:6",
+ "TCP:127.0.0.1:5288:7",
+ "UDP:127.0.0.1:5288:8",
+ "TCP:::1:5288:9",
+ "UDP:::1:5288:10",
+ NULL
+ };
+ sock_requestor_.checkTokens(tokens2, sock_requestor_.given_tokens_,
+ "Given after rollback");
+ sock_requestor_.checkTokens(released1, sock_requestor_.released_tokens_,
+ "Released after rollback");
+}
+
+// Try it at least releases everything when even the rollback fails.
+TEST_F(InstallListenAddresses, brokenRollback) {
+ EXPECT_NO_THROW(installListenAddresses(valid_, store_, dnss_));
+ checkAddresses(valid_, "Before rollback");
+ // Don't check the tokens now, we already do it in rollback and valid tests
+ sock_requestor_.given_tokens_.clear();
+ sock_requestor_.break_rollback_ = true;
+ EXPECT_THROW(installListenAddresses(invalid_, store_, dnss_),
+ SocketRequestor::NonFatalSocketError);
+ // No addresses here
+ EXPECT_TRUE(store_.empty());
+ // The first pair should be requested in the first part of the failure to
+ // bind and the second pair in the first part of rollback
+ const char* tokens[] = {
+ "TCP:127.0.0.1:5288:5",
+ "UDP:127.0.0.1:5288:6",
+ "TCP:127.0.0.1:5288:7",
+ "UDP:127.0.0.1:5288:8",
+ NULL
+ };
+ // The first set should be released, as well as all the ones we request now
+ const char* released[] = {
+ "TCP:127.0.0.1:5288:1",
+ "UDP:127.0.0.1:5288:2",
+ "TCP:::1:5288:3",
+ "UDP:::1:5288:4",
+ "TCP:127.0.0.1:5288:5",
+ "UDP:127.0.0.1:5288:6",
+ "TCP:127.0.0.1:5288:7",
+ "UDP:127.0.0.1:5288:8",
+ NULL
+ };
+ sock_requestor_.checkTokens(tokens, sock_requestor_.given_tokens_,
+ "given");
+ sock_requestor_.checkTokens(released, sock_requestor_.released_tokens_,
+ "released");
+}
+
+// Make sure the death tests are filterable away.
+typedef InstallListenAddresses InstallListenAddressesDeathTest;
+
+// There are systems which don't have EXPECT_DEATH. We skip the tests there.
+// We're lucky, EXPECT_DEATH is a macro, so we can test for its existence this
+// easily.
+#if defined(EXPECT_DEATH) && !defined(NO_EXPECT_DEATH)
+// We make the socket requestor throw a "fatal" exception, one where we can't be
+// sure the state between processes is consistent. So we abort in that case.
+TEST_F(InstallListenAddressesDeathTest, inconsistent) {
+ AddressList deathAddresses;
+ deathAddresses.push_back(AddressPair("192.0.2.3", 5288));
+ // Make sure it actually kills the application (there should be an abort
+ // in this case)
+ EXPECT_DEATH({
+ isc::util::unittests::dontCreateCoreDumps();
+
+ try {
+ installListenAddresses(deathAddresses, store_, dnss_);
+ } catch (...) {
+ // Prevent exceptions killing the application, we need
+ // to make sure it dies the real hard way
+ };
+ }, "");
+}
+
+// If we are unable to tell the boss we closed a socket, we abort, as we are
+// not consistent with the boss most probably.
+TEST_F(InstallListenAddressesDeathTest, cantClose) {
+ installListenAddresses(valid_, store_, dnss_);
+ AddressList empty;
+ // Instruct it to fail on close
+ sock_requestor_.break_release_ = true;
+ EXPECT_DEATH({
+ isc::util::unittests::dontCreateCoreDumps();
+
+ try {
+ // Setting to empty will close all current sockets.
+ // And thanks to the break_release_, the close will
+ // throw, which will make it crash.
+ installListenAddresses(empty, store_, dnss_);
+ } catch (...) {
+ // To make sure it is killed by abort, not by some
+ // (unhandled) exception
+ };
+ }, "");
+ // And reset it back, so it can safely clean up itself.
+ sock_requestor_.break_release_ = false;
}
+#endif // EXPECT_DEATH
}
diff --git a/src/lib/server_common/tests/socket_requestor_test.cc b/src/lib/server_common/tests/socket_requestor_test.cc
new file mode 100644
index 0000000..9adf84d
--- /dev/null
+++ b/src/lib/server_common/tests/socket_requestor_test.cc
@@ -0,0 +1,589 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <server_common/socket_request.h>
+
+#include <gtest/gtest.h>
+
+#include <config/tests/fake_session.h>
+#include <config/ccsession.h>
+#include <exceptions/exceptions.h>
+
+#include <server_common/tests/data_path.h>
+
+#include <cstdlib>
+#include <cstddef>
+#include <cerrno>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include <boost/foreach.hpp>
+#include <boost/scoped_ptr.hpp>
+
+#include <util/io/fd.h>
+#include <util/io/fd_share.h>
+
+using namespace isc::data;
+using namespace isc::config;
+using namespace isc::server_common;
+using namespace isc;
+
+namespace {
+
+// Check it throws an exception when it is not initialized
+TEST(SocketRequestorAccess, unitialized) {
+ // Make sure it is not initialized
+ initTestSocketRequestor(NULL);
+ EXPECT_THROW(socketRequestor(), InvalidOperation);
+}
+
+// It returns whatever it is initialized to
+TEST(SocketRequestorAccess, initialized) {
+ // A concrete implementation that does nothing, just can exist
+ class DummyRequestor : public SocketRequestor {
+ public:
+ DummyRequestor() : SocketRequestor() {}
+ virtual void releaseSocket(const std::string&) {}
+ virtual SocketID requestSocket(Protocol, const std::string&, uint16_t,
+ ShareMode, const std::string&)
+ {
+ return (SocketID(0, "")); // Just to silence warnings
+ }
+ };
+ DummyRequestor requestor;
+ // Make sure it is initialized (the test way, of course)
+ initTestSocketRequestor(&requestor);
+ // It returs the same "pointer" as inserted
+ // The casts are there as the template system seemed to get confused
+ // without them, the types should be correct even without them, but
+ // the EXPECT_EQ wanted to use long long int instead of pointers.
+ EXPECT_EQ(static_cast<const SocketRequestor*>(&requestor),
+ static_cast<const SocketRequestor*>(&socketRequestor()));
+ // Just that we don't have an invalid pointer anyway
+ initTestSocketRequestor(NULL);
+}
+
+// This class contains a fake (module)ccsession to emulate answers from Boss
+class SocketRequestorTest : public ::testing::Test {
+public:
+ SocketRequestorTest() : session(ElementPtr(new ListElement),
+ ElementPtr(new ListElement),
+ ElementPtr(new ListElement))
+ {
+ initSocketRequestor(session, "tests");
+ }
+
+ ~SocketRequestorTest() {
+ cleanupSocketRequestor();
+ }
+
+ // Do a standard request with some default values
+ SocketRequestor::SocketID
+ doRequest() {
+ return (socketRequestor().requestSocket(SocketRequestor::UDP,
+ "192.0.2.1", 12345,
+ SocketRequestor::DONT_SHARE,
+ "test"));
+ }
+
+ // Creates a valid socket request answer, as it would be sent by
+ // Boss. 'valid' in terms of format, not values
+ void
+ addAnswer(const std::string& token, const std::string& path) {
+ ElementPtr answer_part = Element::createMap();
+ answer_part->set("token", Element::create(token));
+ answer_part->set("path", Element::create(path));
+ session.getMessages()->add(createAnswer(0, answer_part));
+ }
+
+ // Clears the messages the client sent so far on the fake msgq
+ // (for easier access to new messages later)
+ void
+ clearMsgQueue() {
+ while (session.getMsgQueue()->size() > 0) {
+ session.getMsgQueue()->remove(0);
+ }
+ }
+
+ isc::cc::FakeSession session;
+ const std::string specfile;
+};
+
+// helper function to create the request packet as we expect the
+// socket requestor to send
+ConstElementPtr
+createExpectedRequest(const std::string& address,
+ int port,
+ const std::string& protocol,
+ const std::string& share_mode,
+ const std::string& share_name)
+{
+ // create command arguments
+ const ElementPtr command_args = Element::createMap();
+ command_args->set("address", Element::create(address));
+ command_args->set("port", Element::create(port));
+ command_args->set("protocol", Element::create(protocol));
+ command_args->set("share_mode", Element::create(share_mode));
+ command_args->set("share_name", Element::create(share_name));
+
+ // create the envelope
+ const ElementPtr packet = Element::createList();
+ packet->add(Element::create("Boss"));
+ packet->add(Element::create("*"));
+ packet->add(createCommand("get_socket", command_args));
+ packet->add(Element::create(-1));
+
+ return (packet);
+}
+
+TEST_F(SocketRequestorTest, testSocketRequestMessages) {
+ // For each request, it will raise CCSessionError, since we don't
+ // answer here.
+ // We are only testing the request messages that are sent,
+ // so for this test that is no problem
+ clearMsgQueue();
+ ConstElementPtr expected_request;
+
+ expected_request = createExpectedRequest("192.0.2.1", 12345, "UDP",
+ "NO", "test");
+ EXPECT_THROW(socketRequestor().requestSocket(SocketRequestor::UDP,
+ "192.0.2.1", 12345,
+ SocketRequestor::DONT_SHARE,
+ "test"),
+ CCSessionError);
+ ASSERT_EQ(1, session.getMsgQueue()->size());
+ EXPECT_EQ(*expected_request, *(session.getMsgQueue()->get(0)));
+
+ clearMsgQueue();
+ expected_request = createExpectedRequest("192.0.2.2", 1, "TCP",
+ "ANY", "test2");
+ EXPECT_THROW(socketRequestor().requestSocket(SocketRequestor::TCP,
+ "192.0.2.2", 1,
+ SocketRequestor::SHARE_ANY,
+ "test2"),
+ CCSessionError);
+ ASSERT_EQ(1, session.getMsgQueue()->size());
+ EXPECT_EQ(*expected_request, *(session.getMsgQueue()->get(0)));
+
+ clearMsgQueue();
+ expected_request = createExpectedRequest("::1", 2, "UDP",
+ "SAMEAPP", "test3");
+ EXPECT_THROW(socketRequestor().requestSocket(SocketRequestor::UDP,
+ "::1", 2,
+ SocketRequestor::SHARE_SAME,
+ "test3"),
+ CCSessionError);
+ ASSERT_EQ(1, session.getMsgQueue()->size());
+ EXPECT_EQ(*expected_request, *(session.getMsgQueue()->get(0)));
+
+ // A default share name equal to the app name passed on construction
+ clearMsgQueue();
+ expected_request = createExpectedRequest("::1", 2, "UDP",
+ "SAMEAPP", "tests");
+ EXPECT_THROW(socketRequestor().requestSocket(SocketRequestor::UDP,
+ "::1", 2,
+ SocketRequestor::SHARE_SAME),
+ CCSessionError);
+ ASSERT_EQ(1, session.getMsgQueue()->size());
+ EXPECT_EQ(*expected_request, *(session.getMsgQueue()->get(0)));
+}
+
+TEST_F(SocketRequestorTest, invalidParameterForSocketRequest) {
+ // Bad protocol
+ EXPECT_THROW(socketRequestor().
+ requestSocket(static_cast<SocketRequestor::Protocol>(2),
+ "192.0.2.1", 12345,
+ SocketRequestor::DONT_SHARE,
+ "test"),
+ InvalidParameter);
+
+ // Bad share mode
+ EXPECT_THROW(socketRequestor().
+ requestSocket(SocketRequestor::UDP,
+ "192.0.2.1", 12345,
+ static_cast<SocketRequestor::ShareMode>(3),
+ "test"),
+ InvalidParameter);
+}
+
+TEST_F(SocketRequestorTest, testBadRequestAnswers) {
+ // Test various scenarios where the requestor gets back bad answers
+
+ // Should raise CCSessionError if there is no answer
+ EXPECT_THROW(doRequest(), CCSessionError);
+
+ // Also if the answer does not match the format
+ session.getMessages()->add(createAnswer());
+ EXPECT_THROW(doRequest(), CCSessionError);
+
+ // Now a 'real' answer, should fail on socket connect (no such file)
+ addAnswer("foo", "/does/not/exist");
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketError);
+
+ // Another failure (domain socket path too long)
+ addAnswer("foo", std::string(1000, 'x'));
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketError);
+
+ // Test values around path boundary
+ struct sockaddr_un sock_un;
+ const std::string max_len(sizeof(sock_un.sun_path) - 1, 'x');
+ addAnswer("foo", max_len);
+ // The failure should NOT contain 'too long'
+ // (explicitly checking for existance of nonexistence of 'too long',
+ // as opposed to the actual error, since 'too long' is a value we set).
+ try {
+ doRequest();
+ FAIL() << "doRequest did not throw an exception";
+ } catch (const SocketRequestor::SocketError& se) {
+ EXPECT_EQ(std::string::npos, std::string(se.what()).find("too long"));
+ }
+
+ const std::string too_long(sizeof(sock_un.sun_path), 'x');
+ addAnswer("foo", too_long);
+ // The failure SHOULD contain 'too long'
+ try {
+ doRequest();
+ FAIL() << "doRequest did not throw an exception";
+ } catch (const SocketRequestor::SocketError& se) {
+ EXPECT_NE(std::string::npos, std::string(se.what()).find("too long"));
+ }
+
+ // Send back an error response
+ // A generic one first
+ session.getMessages()->add(createAnswer(1, "error"));
+ EXPECT_THROW(doRequest(), CCSessionError);
+ // Now some with specific exceptions
+ session.getMessages()->add(createAnswer(2, "error"));
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketAllocateError);
+ session.getMessages()->add(createAnswer(3, "error"));
+ EXPECT_THROW(doRequest(), SocketRequestor::ShareError);
+}
+
+// Helper function to create the release commands as we expect
+// them to be sent by the SocketRequestor class
+ConstElementPtr
+createExpectedRelease(const std::string& token) {
+ // create command arguments
+ const ElementPtr command_args = Element::createMap();
+ command_args->set("token", Element::create(token));
+
+ // create the envelope
+ const ElementPtr packet = Element::createList();
+ packet->add(Element::create("Boss"));
+ packet->add(Element::create("*"));
+ packet->add(createCommand("drop_socket", command_args));
+ packet->add(Element::create(-1));
+
+ return (packet);
+}
+
+TEST_F(SocketRequestorTest, testSocketReleaseMessages) {
+ ConstElementPtr expected_release;
+
+ session.getMessages()->add(createAnswer());
+
+ clearMsgQueue();
+ expected_release = createExpectedRelease("foo");
+ socketRequestor().releaseSocket("foo");
+ ASSERT_EQ(1, session.getMsgQueue()->size());
+ EXPECT_EQ(*expected_release, *(session.getMsgQueue()->get(0)));
+
+ session.getMessages()->add(createAnswer());
+ clearMsgQueue();
+ expected_release = createExpectedRelease("bar");
+ socketRequestor().releaseSocket("bar");
+ ASSERT_EQ(1, session.getMsgQueue()->size());
+ EXPECT_EQ(*expected_release, *(session.getMsgQueue()->get(0)));
+}
+
+TEST_F(SocketRequestorTest, testBadSocketReleaseAnswers) {
+ // Should fail if there is no answer at all
+ EXPECT_THROW(socketRequestor().releaseSocket("bar"),
+ CCSessionError);
+
+ // Should also fail if the answer is an error
+ session.getMessages()->add(createAnswer(1, "error"));
+ EXPECT_THROW(socketRequestor().releaseSocket("bar"),
+ SocketRequestor::SocketError);
+}
+
+// A helper function to impose a read timeout for the server socket
+// in order to avoid deadlock when the client side has a bug and doesn't
+// send expected data.
+// It returns true when the timeout is set successfully; otherwise false.
+bool
+setRecvTimo(int s) {
+ const struct timeval timeo = { 10, 0 }; // 10sec, arbitrary choice
+ if (setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)) == 0) {
+ return (true);
+ }
+ if (errno == ENOPROTOOPT) { // deviant OS, give up using it.
+ return (false);
+ }
+ isc_throw(isc::Unexpected, "set RCVTIMEO failed: " << strerror(errno));
+}
+
+// Helper test class that creates a randomly named domain socket
+// Upon init, it will only reserve the name (and place an empty file in its
+// place).
+// When run() is called, it creates the socket, forks, and the child will
+// listen for a connection, then send all the data passed to run to that
+// connection, and then close the socket
+class TestSocket {
+public:
+ TestSocket() : fd_(-1) {
+ path_ = strdup("test_socket.XXXXXX");
+ // Misuse mkstemp to generate a file name.
+ const int f = mkstemp(path_);
+ if (f == -1) {
+ isc_throw(Unexpected, "mkstemp failed: " << strerror(errno));
+ }
+ // Just need the name, so immediately close
+ close(f);
+ }
+
+ ~TestSocket() {
+ cleanup();
+ }
+
+ void
+ cleanup() {
+ unlink(path_);
+ if (path_ != NULL) {
+ free(path_);
+ path_ = NULL;
+ }
+ if (fd_ != -1) {
+ close(fd_);
+ fd_ = -1;
+ }
+ }
+
+ // Returns the path used for the socket
+ const char* getPath() const {
+ return (path_);
+ }
+
+ // create socket, fork, and serve if child (child will exit when done).
+ // If the underlying system doesn't allow to set read timeout, tell the
+ // caller that via a false return value so that the caller can avoid
+ // performing tests that could result in a dead lock.
+ bool run(const std::vector<std::pair<std::string, int> >& data) {
+ create();
+ const bool timo_ok = setRecvTimo(fd_);
+ const int child_pid = fork();
+ if (child_pid == 0) {
+ serve(data);
+ exit(0);
+ } else {
+ // parent does not need fd anymore
+ close(fd_);
+ fd_ = -1;
+ }
+ return (timo_ok);
+ }
+private:
+ // Actually create the socket and listen on it
+ void
+ create() {
+ fd_ = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd_ == -1) {
+ isc_throw(Unexpected, "Unable to create socket");
+ }
+ struct sockaddr_un socket_address;
+ socket_address.sun_family = AF_UNIX;
+ socklen_t len = strlen(path_);
+ if (len > sizeof(socket_address.sun_path)) {
+ isc_throw(Unexpected,
+ "mkstemp() created a filename too long for sun_path");
+ }
+ strncpy(socket_address.sun_path, path_, len);
+#ifdef HAVE_SA_LEN
+ socket_address.sun_len = len;
+#endif
+
+ len += offsetof(struct sockaddr_un, sun_path);
+ // Remove the random file we created so we can reuse it for
+ // a domain socket connection. This contains a minor race condition
+ // but for the purposes of this test it should be small enough
+ unlink(path_);
+ if (bind(fd_, (const struct sockaddr*)&socket_address, len) == -1) {
+ isc_throw(Unexpected,
+ "unable to bind to test domain socket " << path_ <<
+ ": " << strerror(errno));
+ }
+
+ if (listen(fd_, 1) == -1) {
+ isc_throw(Unexpected,
+ "unable to listen on test domain socket " << path_ <<
+ ": " << strerror(errno));
+ }
+ }
+
+ // Accept one connection, then for each value of the vector,
+ // read the socket token from the connection and match the string
+ // part of the vector element, and send the integer part of the element
+ // using send_fd() (prepended by a status code 'ok'). For simplicity
+ // we assume the tokens are 4 bytes long; if the test case uses a
+ // different size of token the test will fail.
+ //
+ // There are a few specific exceptions;
+ // when the value is -1, it will send back an error value (signaling
+ // CREATOR_SOCKET_UNAVAILABLE)
+ // when the value is -2, it will send a byte signaling CREATOR_SOCKET_OK
+ // first, and then one byte from some string (i.e. bad data, not using
+ // send_fd())
+ //
+ // NOTE: client_fd could leak on exception. This should be cleaned up.
+ // See the note about SocketSessionReceiver in socket_request.cc.
+ void
+ serve(const std::vector<std::pair<std::string, int> > data) {
+ const int client_fd = accept(fd_, NULL, NULL);
+ if (client_fd == -1) {
+ isc_throw(Unexpected, "Error in accept(): " << strerror(errno));
+ }
+ if (!setRecvTimo(client_fd)) {
+ // In the loop below we do blocking read. To avoid deadlock
+ // when the parent is buggy we'll skip it unless we can
+ // set a read timeout on the socket.
+ return;
+ }
+ typedef std::pair<std::string, int> DataPair;
+ BOOST_FOREACH(DataPair cur_data, data) {
+ char buf[5];
+ memset(buf, 0, 5);
+ if (isc::util::io::read_data(client_fd, buf, 4) != 4) {
+ isc_throw(Unexpected, "unable to receive socket token");
+ }
+ if (cur_data.first != buf) {
+ isc_throw(Unexpected, "socket token mismatch: expected="
+ << cur_data.first << ", actual=" << buf);
+ }
+
+ bool result;
+ if (cur_data.second == -1) {
+ // send 'CREATOR_SOCKET_UNAVAILABLE'
+ result = isc::util::io::write_data(client_fd, "0\n", 2);
+ } else if (cur_data.second == -2) {
+ // send 'CREATOR_SOCKET_OK' first
+ result = isc::util::io::write_data(client_fd, "1\n", 2);
+ if (result) {
+ if (send(client_fd, "a", 1, 0) != 1) {
+ result = false;
+ }
+ }
+ } else {
+ // send 'CREATOR_SOCKET_OK' first
+ result = isc::util::io::write_data(client_fd, "1\n", 2);
+ if (result) {
+ if (isc::util::io::send_fd(client_fd,
+ cur_data.second) != 0) {
+ result = false;
+ }
+ }
+ }
+ if (!result) {
+ isc_throw(Exception, "Error in send_fd(): " <<
+ strerror(errno));
+ }
+ }
+ close(client_fd);
+ }
+
+ int fd_;
+ char* path_;
+};
+
+TEST_F(SocketRequestorTest, testSocketPassing) {
+ TestSocket ts;
+ std::vector<std::pair<std::string, int> > data;
+ data.push_back(std::pair<std::string, int>("foo\n", 1));
+ data.push_back(std::pair<std::string, int>("bar\n", 2));
+ data.push_back(std::pair<std::string, int>("foo\n", 3));
+ data.push_back(std::pair<std::string, int>("foo\n", 1));
+ data.push_back(std::pair<std::string, int>("foo\n", -1));
+ data.push_back(std::pair<std::string, int>("foo\n", -2));
+
+ // run() returns true iff we can specify read timeout so we avoid a
+ // deadlock. Unless there's a bug the test should succeed even without the
+ // timeout, but we don't want to make the test hang up in case with an
+ // unexpected bug, so we'd rather skip most of the tests in that case.
+ const bool timo_ok = ts.run(data);
+ SocketRequestor::SocketID socket_id;
+ if (timo_ok) {
+ // 1 should be ok
+ addAnswer("foo", ts.getPath());
+ socket_id = doRequest();
+ EXPECT_EQ("foo", socket_id.second);
+ EXPECT_EQ(0, close(socket_id.first));
+
+ // 2 should be ok too
+ addAnswer("bar", ts.getPath());
+ socket_id = doRequest();
+ EXPECT_EQ("bar", socket_id.second);
+ EXPECT_EQ(0, close(socket_id.first));
+
+ // 3 should be ok too (reuse earlier token)
+ addAnswer("foo", ts.getPath());
+ socket_id = doRequest();
+ EXPECT_EQ("foo", socket_id.second);
+ EXPECT_EQ(0, close(socket_id.first));
+ }
+
+ // Create a second socket server, to test that multiple different
+ // domains sockets would work as well (even though we don't actually
+ // use that feature)
+ TestSocket ts2;
+ std::vector<std::pair<std::string, int> > data2;
+ data2.push_back(std::pair<std::string, int>("foo\n", 1));
+ const bool timo_ok2 = ts2.run(data2);
+
+ if (timo_ok2) {
+ // 1 should be ok
+ addAnswer("foo", ts2.getPath());
+ socket_id = doRequest();
+ EXPECT_EQ("foo", socket_id.second);
+ EXPECT_EQ(0, close(socket_id.first));
+ }
+
+ if (timo_ok) {
+ // Now use first socket again
+ addAnswer("foo", ts.getPath());
+ socket_id = doRequest();
+ EXPECT_EQ("foo", socket_id.second);
+ EXPECT_EQ(0, close(socket_id.first));
+
+ // -1 is a "normal" error
+ addAnswer("foo", ts.getPath());
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketError);
+
+ // -2 is an unexpected error. After this point it's not guaranteed the
+ // connection works as intended.
+ addAnswer("foo", ts.getPath());
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketError);
+ }
+
+ // Vector is of first socket is now empty, so the socket should be gone
+ addAnswer("foo", ts.getPath());
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketError);
+
+ // Vector is of second socket is now empty too, so the socket should be
+ // gone
+ addAnswer("foo", ts2.getPath());
+ EXPECT_THROW(doRequest(), SocketRequestor::SocketError);
+}
+
+}
diff --git a/src/lib/statistics/Makefile.am b/src/lib/statistics/Makefile.am
new file mode 100644
index 0000000..6c7b910
--- /dev/null
+++ b/src/lib/statistics/Makefile.am
@@ -0,0 +1,24 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES) $(MULTITHREADING_FLAG)
+AM_CPPFLAGS += -I$(top_srcdir)/src/lib/statistics -I$(top_builddir)/src/lib/statistics
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
+if USE_CLANGPP
+# clang++ complains about unused function parameters in some boost header
+# files.
+AM_CXXFLAGS += -Wno-unused-parameter
+endif
+
+lib_LTLIBRARIES = libstatistics.la
+libstatistics_la_SOURCES = counter.h counter.cc
+libstatistics_la_SOURCES += counter_dict.h counter_dict.cc
+
+CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/statistics/counter.cc b/src/lib/statistics/counter.cc
new file mode 100644
index 0000000..53dc58e
--- /dev/null
+++ b/src/lib/statistics/counter.cc
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <vector>
+
+#include <boost/noncopyable.hpp>
+
+#include <statistics/counter.h>
+
+namespace {
+const unsigned int InitialValue = 0;
+} // namespace
+
+namespace isc {
+namespace statistics {
+
+class CounterImpl : boost::noncopyable {
+ private:
+ std::vector<Counter::Value> counters_;
+ public:
+ CounterImpl(const size_t nelements);
+ ~CounterImpl();
+ void inc(const Counter::Type&);
+ const Counter::Value& get(const Counter::Type&) const;
+};
+
+CounterImpl::CounterImpl(const size_t items) :
+ counters_(items, InitialValue)
+{
+ if (items == 0) {
+ isc_throw(isc::InvalidParameter, "Items must not be 0");
+ }
+}
+
+CounterImpl::~CounterImpl() {}
+
+void
+CounterImpl::inc(const Counter::Type& type) {
+ if(type >= counters_.size()) {
+ isc_throw(isc::OutOfRange, "Counter type is out of range");
+ }
+ ++counters_.at(type);
+ return;
+}
+
+const Counter::Value&
+CounterImpl::get(const Counter::Type& type) const {
+ if(type >= counters_.size()) {
+ isc_throw(isc::OutOfRange, "Counter type is out of range");
+ }
+ return (counters_.at(type));
+}
+
+Counter::Counter(const size_t items) : impl_(new CounterImpl(items))
+{}
+
+Counter::~Counter() {}
+
+void
+Counter::inc(const Type& type) {
+ impl_->inc(type);
+ return;
+}
+
+const Counter::Value&
+Counter::get(const Type& type) const {
+ return (impl_->get(type));
+}
+
+} // namespace statistics
+} // namespace isc
diff --git a/src/lib/statistics/counter.h b/src/lib/statistics/counter.h
new file mode 100644
index 0000000..9e467ce
--- /dev/null
+++ b/src/lib/statistics/counter.h
@@ -0,0 +1,69 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __COUNTER_H
+#define __COUNTER_H 1
+
+#include <boost/noncopyable.hpp>
+#include <boost/scoped_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+namespace isc {
+namespace statistics {
+
+// forward declaration for pImpl idiom
+class CounterImpl;
+
+class Counter : boost::noncopyable {
+private:
+ boost::scoped_ptr<CounterImpl> impl_;
+public:
+ typedef unsigned int Type;
+ typedef unsigned int Value;
+
+ /// The constructor.
+ ///
+ /// This constructor is mostly exception free. But it may still throw
+ /// a standard exception if memory allocation fails inside the method.
+ ///
+ /// \param items A number of counter items to hold (greater than 0)
+ ///
+ /// \throw isc::InvalidParameter \a items is 0
+ Counter(const size_t items);
+
+ /// The destructor.
+ ///
+ /// This method never throws an exception.
+ ~Counter();
+
+ /// \brief Increment a counter item specified with \a type.
+ ///
+ /// \param type %Counter item to increment
+ ///
+ /// \throw isc::OutOfRange \a type is invalid
+ void inc(const Type& type);
+
+ /// \brief Get the value of a counter item specified with \a type.
+ ///
+ /// \param type %Counter item to get the value of
+ ///
+ /// \throw isc::OutOfRange \a type is invalid
+ const Value& get(const Type& type) const;
+};
+
+} // namespace statistics
+} // namespace isc
+
+#endif
diff --git a/src/lib/statistics/counter_dict.cc b/src/lib/statistics/counter_dict.cc
new file mode 100644
index 0000000..55353b2
--- /dev/null
+++ b/src/lib/statistics/counter_dict.cc
@@ -0,0 +1,265 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <cassert>
+#include <stdexcept>
+#include <iterator>
+#include <map>
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <statistics/counter_dict.h>
+
+namespace {
+typedef boost::shared_ptr<isc::statistics::Counter> CounterPtr;
+typedef std::map<std::string, CounterPtr> DictionaryMap;
+}
+
+namespace isc {
+namespace statistics {
+
+// Implementation detail class for CounterDictionary::ConstIterator
+class CounterDictionaryConstIteratorImpl;
+
+class CounterDictionaryImpl : boost::noncopyable {
+private:
+ DictionaryMap dictionary_;
+ std::vector<std::string> elements_;
+ const size_t items_;
+ // Default constructor is forbidden; number of counter items must be
+ // specified at the construction of this class.
+ CounterDictionaryImpl();
+public:
+ CounterDictionaryImpl(const size_t items);
+ ~CounterDictionaryImpl();
+ void addElement(const std::string& name);
+ void deleteElement(const std::string& name);
+ Counter& getElement(const std::string& name);
+public:
+ CounterDictionaryConstIteratorImpl begin() const;
+ CounterDictionaryConstIteratorImpl end() const;
+};
+
+// Constructor with number of items
+CounterDictionaryImpl::CounterDictionaryImpl(const size_t items) :
+ items_(items)
+{
+ // The number of items must not be 0
+ if (items == 0) {
+ isc_throw(isc::InvalidParameter, "Items must not be 0");
+ }
+}
+
+// Destructor
+CounterDictionaryImpl::~CounterDictionaryImpl() {}
+
+void
+CounterDictionaryImpl::addElement(const std::string& name) {
+ // throw if the element already exists
+ if (dictionary_.count(name) != 0) {
+ isc_throw(isc::InvalidParameter,
+ "Element " << name << " already exists");
+ }
+ assert(items_ != 0);
+ // Create a new Counter and add to the map
+ dictionary_.insert(
+ DictionaryMap::value_type(name, CounterPtr(new Counter(items_))));
+}
+
+void
+CounterDictionaryImpl::deleteElement(const std::string& name) {
+ size_t result = dictionary_.erase(name);
+ if (result != 1) {
+ // If an element with specified name does not exist, throw
+ // isc::OutOfRange.
+ isc_throw(isc::OutOfRange, "Element " << name << " does not exist");
+ }
+}
+
+Counter&
+CounterDictionaryImpl::getElement(const std::string& name) {
+ DictionaryMap::const_iterator i = dictionary_.find(name);
+ if (i != dictionary_.end()) {
+ // the key was found. return the element.
+ return (*(i->second));
+ } else {
+ // If an element with specified name does not exist, throw
+ // isc::OutOfRange.
+ isc_throw(isc::OutOfRange, "Element " << name << " does not exist");
+ }
+}
+
+// Constructor
+// Initialize impl_
+CounterDictionary::CounterDictionary(const size_t items) :
+ impl_(new CounterDictionaryImpl(items))
+{}
+
+// Destructor
+// impl_ will be freed automatically with scoped_ptr
+CounterDictionary::~CounterDictionary() {}
+
+void
+CounterDictionary::addElement(const std::string& name) {
+ impl_->addElement(name);
+}
+
+void
+CounterDictionary::deleteElement(const std::string& name) {
+ impl_->deleteElement(name);
+}
+
+Counter&
+CounterDictionary::getElement(const std::string& name) const {
+ return (impl_->getElement(name));
+}
+
+Counter&
+CounterDictionary::operator[](const std::string& name) const {
+ return (impl_->getElement(name));
+}
+
+// Implementation detail class for CounterDictionary::ConstIterator
+class CounterDictionaryConstIteratorImpl {
+ public:
+ CounterDictionaryConstIteratorImpl();
+ ~CounterDictionaryConstIteratorImpl();
+ CounterDictionaryConstIteratorImpl(
+ const CounterDictionaryConstIteratorImpl &other);
+ CounterDictionaryConstIteratorImpl &operator=(
+ const CounterDictionaryConstIteratorImpl &source);
+ CounterDictionaryConstIteratorImpl(
+ DictionaryMap::const_iterator iterator);
+ public:
+ void increment();
+ const CounterDictionary::ConstIterator::value_type&
+ dereference() const;
+ bool equal(const CounterDictionaryConstIteratorImpl& other) const;
+ private:
+ DictionaryMap::const_iterator iterator_;
+};
+
+CounterDictionaryConstIteratorImpl::CounterDictionaryConstIteratorImpl() {}
+
+CounterDictionaryConstIteratorImpl::~CounterDictionaryConstIteratorImpl() {}
+
+// Copy constructor: deep copy of iterator_
+CounterDictionaryConstIteratorImpl::CounterDictionaryConstIteratorImpl(
+ const CounterDictionaryConstIteratorImpl &other) :
+ iterator_(other.iterator_)
+{}
+
+// Assignment operator: deep copy of iterator_
+CounterDictionaryConstIteratorImpl &
+CounterDictionaryConstIteratorImpl::operator=(
+ const CounterDictionaryConstIteratorImpl &source)
+{
+ iterator_ = source.iterator_;
+ return (*this);
+}
+
+// Constructor from implementation detail DictionaryMap::const_iterator
+CounterDictionaryConstIteratorImpl::CounterDictionaryConstIteratorImpl(
+ DictionaryMap::const_iterator iterator) :
+ iterator_(iterator)
+{}
+
+CounterDictionaryConstIteratorImpl
+CounterDictionaryImpl::begin() const {
+ return (CounterDictionaryConstIteratorImpl(dictionary_.begin()));
+}
+
+CounterDictionaryConstIteratorImpl
+CounterDictionaryImpl::end() const {
+ return (CounterDictionaryConstIteratorImpl(dictionary_.end()));
+}
+
+void
+CounterDictionaryConstIteratorImpl::increment() {
+ ++iterator_;
+ return;
+}
+
+const CounterDictionary::ConstIterator::value_type&
+CounterDictionaryConstIteratorImpl::dereference() const {
+ return (iterator_->first);
+}
+
+bool
+CounterDictionaryConstIteratorImpl::equal(
+ const CounterDictionaryConstIteratorImpl& other) const
+{
+ return (iterator_ == other.iterator_);
+}
+
+CounterDictionary::ConstIterator
+CounterDictionary::begin() const {
+ return (CounterDictionary::ConstIterator(
+ CounterDictionaryConstIteratorImpl(impl_->begin())));
+}
+
+CounterDictionary::ConstIterator
+CounterDictionary::end() const {
+ return (CounterDictionary::ConstIterator(
+ CounterDictionaryConstIteratorImpl(impl_->end())));
+}
+
+CounterDictionary::ConstIterator::ConstIterator() :
+ impl_(new CounterDictionaryConstIteratorImpl())
+{}
+
+CounterDictionary::ConstIterator::~ConstIterator() {}
+
+// Copy constructor: deep copy of impl_
+CounterDictionary::ConstIterator::ConstIterator(
+ const CounterDictionary::ConstIterator& source) :
+ impl_(new CounterDictionaryConstIteratorImpl(*(source.impl_)))
+{}
+
+// Assignment operator: deep copy of impl_
+CounterDictionary::ConstIterator &
+CounterDictionary::ConstIterator::operator=(
+ const CounterDictionary::ConstIterator &source)
+{
+ *impl_ = *source.impl_;
+ return (*this);
+}
+
+// The constructor from implementation detail
+CounterDictionary::ConstIterator::ConstIterator(
+ const CounterDictionaryConstIteratorImpl& source) :
+ impl_(new CounterDictionaryConstIteratorImpl(source))
+{}
+
+const CounterDictionary::ConstIterator::value_type&
+CounterDictionary::ConstIterator::dereference() const
+{
+ return (impl_->dereference());
+}
+
+bool
+CounterDictionary::ConstIterator::equal(
+ CounterDictionary::ConstIterator const& other) const
+{
+ return (impl_->equal(*(other.impl_)));
+}
+
+void
+CounterDictionary::ConstIterator::increment() {
+ impl_->increment();
+ return;
+}
+
+} // namespace statistics
+} // namespace isc
diff --git a/src/lib/statistics/counter_dict.h b/src/lib/statistics/counter_dict.h
new file mode 100644
index 0000000..e322119
--- /dev/null
+++ b/src/lib/statistics/counter_dict.h
@@ -0,0 +1,159 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __COUNTER_DICT_H
+#define __COUNTER_DICT_H 1
+
+#include <string>
+#include <vector>
+#include <utility>
+#include <boost/noncopyable.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <boost/iterator/iterator_facade.hpp>
+
+#include <exceptions/exceptions.h>
+#include <statistics/counter.h>
+
+namespace isc {
+namespace statistics {
+
+class CounterDictionaryImpl;
+class CounterDictionaryConstIteratorImpl;
+
+class CounterDictionary : boost::noncopyable {
+private:
+ boost::scoped_ptr<CounterDictionaryImpl> impl_;
+ // Default constructor is forbidden; number of counter items must be
+ // specified at the construction of this class.
+ CounterDictionary();
+public:
+ /// The constructor.
+ /// This constructor is mostly exception free. But it may still throw
+ /// a standard exception if memory allocation fails inside the method.
+ ///
+ /// \param items A number of counter items to hold (greater than 0)
+ ///
+ /// \throw isc::InvalidParameter \a items is 0
+ CounterDictionary(const size_t items);
+
+ /// The destructor.
+ ///
+ /// This method never throws an exception.
+ ~CounterDictionary();
+
+ /// \brief Add an element
+ ///
+ /// \throw isc::InvalidParameter \a element already exists.
+ ///
+ /// \param name A name of the element to append
+ void addElement(const std::string& name);
+
+ /// \brief Delete
+ ///
+ /// \throw isc::OutOfRange \a element does not exist.
+ ///
+ /// \param name A name of the element to delete
+ void deleteElement(const std::string& name);
+
+ /// \brief Lookup
+ ///
+ /// \throw isc::OutOfRange \a element does not exist.
+ ///
+ /// \param name A name of the element to get the counters
+ Counter& getElement(const std::string &name) const;
+
+ /// Same as getElement()
+ Counter& operator[](const std::string &name) const;
+
+ /// \brief \c ConstIterator is a constant iterator that provides an
+ /// interface for enumerating name of zones stored in CounterDictionary.
+ ///
+ /// This class is derived from boost::iterator_facade and uses pImpl
+ /// idiom not to expose implementation detail of
+ /// CounterDictionary::iterator.
+ ///
+ /// It is intended to walk through the elements when sending the
+ /// counters to statistics module.
+ class ConstIterator :
+ public boost::iterator_facade<ConstIterator,
+ const std::string,
+ boost::forward_traversal_tag>
+ {
+ private:
+ boost::scoped_ptr<CounterDictionaryConstIteratorImpl> impl_;
+ public:
+ /// The constructor.
+ ///
+ /// This constructor is mostly exception free. But it may still
+ /// throw a standard exception if memory allocation fails
+ /// inside the method.
+ ConstIterator();
+ /// The destructor.
+ ///
+ /// This method never throws an exception.
+ ~ConstIterator();
+ /// The assignment operator.
+ ///
+ /// This method is mostly exception free. But it may still
+ /// throw a standard exception if memory allocation fails
+ /// inside the method.
+ ConstIterator& operator=(const ConstIterator &source);
+ /// The copy constructor.
+ ///
+ /// This constructor is mostly exception free. But it may still
+ /// throw a standard exception if memory allocation fails
+ /// inside the method.
+ ConstIterator(const ConstIterator& source);
+ /// The constructor from implementation detail.
+ ///
+ /// This method is used to create an instance of ConstIterator
+ /// by CounterDict::begin() and CounterDict::end().
+ ///
+ /// This constructor is mostly exception free. But it may still
+ /// throw a standard exception if memory allocation fails
+ /// inside the method.
+ ConstIterator(
+ const CounterDictionaryConstIteratorImpl& source);
+ private:
+ /// \brief An internal method to increment this iterator.
+ void increment();
+ /// \brief An internal method to check equality.
+ bool equal(const ConstIterator& other) const;
+ /// \brief An internal method to dereference this iterator.
+ const value_type& dereference() const;
+ private:
+ friend class boost::iterator_core_access;
+ };
+
+ typedef ConstIterator const_iterator;
+
+ /// \brief Return an iterator corresponding to the beginning of the
+ /// elements stored in CounterDictionary.
+ ///
+ /// This method is mostly exception free. But it may still throw a
+ /// standard exception if memory allocation fails inside the method.
+ const_iterator begin() const;
+
+ /// \brief Return an iterator corresponding to the end of the elements
+ /// stored in CounterDictionary.
+ ///
+ /// This method is mostly exception free. But it may still throw a
+ /// standard exception if memory allocation fails inside the method.
+ const_iterator end() const;
+};
+
+} // namespace statistics
+} // namespace isc
+
+#endif
diff --git a/src/lib/statistics/tests/Makefile.am b/src/lib/statistics/tests/Makefile.am
new file mode 100644
index 0000000..c6e7cb8
--- /dev/null
+++ b/src/lib/statistics/tests/Makefile.am
@@ -0,0 +1,50 @@
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS_ENVIRONMENT = \
+ $(LIBTOOL) --mode=execute $(VALGRIND_COMMAND)
+
+TESTS =
+if HAVE_GTEST
+TESTS += run_unittests
+run_unittests_SOURCES = run_unittests.cc
+run_unittests_SOURCES += counter_unittest.cc
+run_unittests_SOURCES += counter_dict_unittest.cc
+
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
+# B10_CXXFLAGS)
+run_unittests_CXXFLAGS = $(AM_CXXFLAGS)
+if USE_GXX
+run_unittests_CXXFLAGS += -Wno-unused-parameter
+endif
+if USE_CLANGPP
+# Same for clang++, but we need to turn off -Werror completely.
+run_unittests_CXXFLAGS += -Wno-error
+endif
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/statistics/tests/counter_dict_unittest.cc b/src/lib/statistics/tests/counter_dict_unittest.cc
new file mode 100644
index 0000000..2578b46
--- /dev/null
+++ b/src/lib/statistics/tests/counter_dict_unittest.cc
@@ -0,0 +1,174 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <gtest/gtest.h>
+
+#include <set>
+
+#include <boost/foreach.hpp>
+
+#include <statistics/counter_dict.h>
+
+enum CounterItems {
+ ITEM1 = 0,
+ ITEM2 = 1,
+ ITEM3 = 2,
+ NUMBER_OF_ITEMS = 3
+};
+
+using namespace isc::statistics;
+
+TEST(CounterDictionaryCreateTest, invalidCounterSize) {
+ // Creating counter with 0 elements will cause an isc::InvalidParameter
+ // exception
+ EXPECT_THROW(CounterDictionary counters(0), isc::InvalidParameter);
+}
+
+// This fixture is for testing CounterDictionary.
+class CounterDictionaryTest : public ::testing::Test {
+protected:
+ CounterDictionaryTest() : counters(NUMBER_OF_ITEMS) {
+ counters.addElement("test");
+ counters.addElement("sub.test");
+ }
+ ~CounterDictionaryTest() {}
+
+ CounterDictionary counters;
+};
+
+TEST_F(CounterDictionaryTest, initializeCheck) {
+ // Check if the all counters are initialized with 0
+ EXPECT_EQ(counters["test"].get(ITEM1), 0);
+ EXPECT_EQ(counters["test"].get(ITEM2), 0);
+ EXPECT_EQ(counters["test"].get(ITEM3), 0);
+}
+
+TEST_F(CounterDictionaryTest, getElement) {
+ // Another member function to get counters for the element
+ EXPECT_EQ(counters.getElement("test").get(ITEM1), 0);
+ EXPECT_EQ(counters.getElement("test").get(ITEM2), 0);
+ EXPECT_EQ(counters.getElement("test").get(ITEM3), 0);
+}
+
+TEST_F(CounterDictionaryTest, incrementCounterItem) {
+ // Increment counters
+ counters["test"].inc(ITEM1);
+ counters["test"].inc(ITEM2);
+ counters["test"].inc(ITEM2);
+ counters["test"].inc(ITEM3);
+ counters["test"].inc(ITEM3);
+ counters["test"].inc(ITEM3);
+ // Check if the counters have expected values
+ EXPECT_EQ(counters["test"].get(ITEM1), 1);
+ EXPECT_EQ(counters["test"].get(ITEM2), 2);
+ EXPECT_EQ(counters["test"].get(ITEM3), 3);
+ EXPECT_EQ(counters["sub.test"].get(ITEM1), 0);
+ EXPECT_EQ(counters["sub.test"].get(ITEM2), 0);
+ EXPECT_EQ(counters["sub.test"].get(ITEM3), 0);
+}
+
+TEST_F(CounterDictionaryTest, deleteElement) {
+ // Ensure the element is accessible
+ EXPECT_EQ(counters["test"].get(ITEM1), 0);
+ EXPECT_EQ(counters["test"].get(ITEM2), 0);
+ EXPECT_EQ(counters["test"].get(ITEM3), 0);
+ // Delete the element
+ counters.deleteElement("test");
+ // Accessing to the deleted element will cause an isc::OutOfRange exception
+ EXPECT_THROW(counters["test"].get(ITEM1), isc::OutOfRange);
+ // Deleting an element which does not exist will cause an isc::OutOfRange
+ // exception
+ EXPECT_THROW(counters.deleteElement("test"), isc::OutOfRange);
+}
+
+TEST_F(CounterDictionaryTest, invalidCounterItem) {
+ // Incrementing out-of-bound counter will cause an isc::OutOfRange
+ // exception
+ EXPECT_THROW(counters["test"].inc(NUMBER_OF_ITEMS), isc::OutOfRange);
+}
+
+TEST_F(CounterDictionaryTest, uniquenessCheck) {
+ // Adding an element which already exists will cause an isc::OutOfRange
+ // exception
+ EXPECT_THROW(counters.addElement("test"), isc::InvalidParameter);
+}
+
+TEST_F(CounterDictionaryTest, iteratorTest) {
+ // Increment counters
+ counters["test"].inc(ITEM1);
+ counters["sub.test"].inc(ITEM2);
+ counters["sub.test"].inc(ITEM2);
+
+ // boolean values to check all of the elements can be accessed through
+ // the iterator
+ bool element_test_visited = false;
+ bool element_sub_test_visited = false;
+ // Walk through the elements with iterator
+ // Check if the elements "test" and "sub.test" appears only once
+ // and the counters have expected value
+ for (CounterDictionary::ConstIterator i = counters.begin(),
+ e = counters.end();
+ i != e;
+ ++i
+ )
+ {
+ const std::string& zone = *i;
+ if (zone == "test" && element_test_visited == false) {
+ element_test_visited = true;
+ // Check if the counters have expected value
+ EXPECT_EQ(counters[zone].get(ITEM1), 1);
+ EXPECT_EQ(counters[zone].get(ITEM2), 0);
+ } else if (zone == "sub.test" &&
+ element_sub_test_visited == false) {
+ element_sub_test_visited = true;
+ // Check if the counters have expected value
+ EXPECT_EQ(counters[zone].get(ITEM1), 0);
+ EXPECT_EQ(counters[zone].get(ITEM2), 2);
+ } else {
+ // Test fails when reaches here: the element is not expected or
+ // the element appeared twice
+ FAIL() << "Unexpected iterator value";
+ }
+ }
+ // Check if the "test" and "sub.test" is accessible
+ EXPECT_TRUE(element_test_visited);
+ EXPECT_TRUE(element_sub_test_visited);
+}
+
+TEST_F(CounterDictionaryTest, iteratorCopyTest) {
+ // Increment counters
+ counters["test"].inc(ITEM1);
+ counters["sub.test"].inc(ITEM2);
+ counters["sub.test"].inc(ITEM2);
+
+ CounterDictionary::ConstIterator i1 = counters.begin();
+ CounterDictionary::ConstIterator i2(i1);
+ CounterDictionary::ConstIterator i3;
+ i3 = i1;
+
+ EXPECT_TRUE(i1 == i2);
+ EXPECT_TRUE(i1 == i3);
+ EXPECT_TRUE(i2 == i3);
+
+ ++i2;
+ EXPECT_TRUE(i1 != i2);
+ EXPECT_TRUE(i1 == i3);
+ EXPECT_TRUE(i2 != i3);
+
+ ++i3;
+ EXPECT_TRUE(i1 != i2);
+ EXPECT_TRUE(i1 != i3);
+ EXPECT_TRUE(i2 == i3);
+}
diff --git a/src/lib/statistics/tests/counter_unittest.cc b/src/lib/statistics/tests/counter_unittest.cc
new file mode 100644
index 0000000..e0d29ac
--- /dev/null
+++ b/src/lib/statistics/tests/counter_unittest.cc
@@ -0,0 +1,85 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <gtest/gtest.h>
+
+#include <statistics/counter.h>
+
+namespace {
+enum CounterItems {
+ ITEM1 = 0,
+ ITEM2 = 1,
+ ITEM3 = 2,
+ NUMBER_OF_ITEMS = 3
+};
+}
+
+using namespace isc::statistics;
+
+TEST(CounterCreateTest, invalidCounterSize) {
+ // Creating counter with 0 elements will cause an isc::InvalidParameter
+ // exception
+ EXPECT_THROW(Counter counter(0), isc::InvalidParameter);
+}
+
+// This fixture is for testing Counter.
+class CounterTest : public ::testing::Test {
+protected:
+ CounterTest() : counter(NUMBER_OF_ITEMS) {}
+ ~CounterTest() {}
+
+ Counter counter;
+};
+
+TEST_F(CounterTest, createCounter) {
+ // Check if the all counters are initialized with 0
+ EXPECT_EQ(counter.get(ITEM1), 0);
+ EXPECT_EQ(counter.get(ITEM2), 0);
+ EXPECT_EQ(counter.get(ITEM3), 0);
+}
+
+TEST_F(CounterTest, incrementCounterItem) {
+ // Increment counters
+ counter.inc(ITEM1);
+ counter.inc(ITEM2);
+ counter.inc(ITEM2);
+ counter.inc(ITEM3);
+ counter.inc(ITEM3);
+ counter.inc(ITEM3);
+ // Check if the counters have expected values
+ EXPECT_EQ(counter.get(ITEM1), 1);
+ EXPECT_EQ(counter.get(ITEM2), 2);
+ EXPECT_EQ(counter.get(ITEM3), 3);
+ // Increment counters once more
+ counter.inc(ITEM1);
+ counter.inc(ITEM2);
+ counter.inc(ITEM2);
+ counter.inc(ITEM3);
+ counter.inc(ITEM3);
+ counter.inc(ITEM3);
+ // Check if the counters have expected values
+ EXPECT_EQ(counter.get(ITEM1), 2);
+ EXPECT_EQ(counter.get(ITEM2), 4);
+ EXPECT_EQ(counter.get(ITEM3), 6);
+}
+
+TEST_F(CounterTest, invalidCounterItem) {
+ // Incrementing out-of-bound counter will cause an isc::OutOfRange
+ // exception
+ EXPECT_THROW(counter.inc(NUMBER_OF_ITEMS), isc::OutOfRange);
+ // Trying to get out-of-bound counter will cause an isc::OutOfRange
+ // exception
+ EXPECT_THROW(counter.get(NUMBER_OF_ITEMS), isc::OutOfRange);
+}
diff --git a/src/lib/statistics/tests/run_unittests.cc b/src/lib/statistics/tests/run_unittests.cc
new file mode 100644
index 0000000..38a299e
--- /dev/null
+++ b/src/lib/statistics/tests/run_unittests.cc
@@ -0,0 +1,25 @@
+// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
+#include <log/logger_support.h>
+
+int
+main(int argc, char* argv[])
+{
+ ::testing::InitGoogleTest(&argc, argv); // Initialize Google test
+ isc::log::initLogger();
+ return (isc::util::unittests::run_all());
+}
diff --git a/src/lib/util/io/PARTIAL_PORT_ON_WINDOWS b/src/lib/util/io/PARTIAL_PORT_ON_WINDOWS
new file mode 100644
index 0000000..e69de29
More information about the bind10-changes
mailing list