BIND 10 trac826, updated. 84a384a93a4d124676ce7595e364e17b2fab2a29 [trac826] host and resolver
BIND 10 source code commits
bind10-changes at lists.isc.org
Mon Oct 3 14:09:36 UTC 2011
The branch, trac826 has been updated
via 84a384a93a4d124676ce7595e364e17b2fab2a29 (commit)
from dc4d318c6fffd6c57ba7be8cfde1175230fd3173 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 84a384a93a4d124676ce7595e364e17b2fab2a29
Author: Francis Dupont <fdupont at isc.org>
Date: Mon Oct 3 16:07:58 2011 +0200
[trac826] host and resolver
global src/bin update
b10-host (checked)
b10-resolver (including tests)
-----------------------------------------------------------------------
Summary of changes:
src/bin/Makefile.am | 2 +-
src/bin/auth/Makefile.am | 17 +-
src/bin/auth/auth.spec.pre.in | 18 +
src/bin/auth/auth_config.cc | 31 +-
src/bin/auth/auth_log.cc | 26 +
src/bin/auth/auth_log.h | 54 +
src/bin/auth/auth_messages.mes | 260 +++++
src/bin/auth/auth_srv.cc | 269 +++---
src/bin/auth/auth_srv.h | 58 +-
src/bin/auth/b10-auth.8 | 47 +-
src/bin/auth/b10-auth.xml | 48 +-
src/bin/auth/benchmarks/Makefile.am | 5 +
src/bin/auth/command.cc | 58 +-
src/bin/auth/main.cc | 67 +-
src/bin/auth/query.cc | 109 ++-
src/bin/auth/query.h | 57 +-
src/bin/auth/statistics.cc | 39 +-
src/bin/auth/statistics.h | 7 +-
src/bin/auth/tests/Makefile.am | 6 +
src/bin/auth/tests/auth_srv_unittest.cc | 157 +++-
src/bin/auth/tests/command_unittest.cc | 65 +-
src/bin/auth/tests/config_unittest.cc | 53 +-
src/bin/auth/tests/query_unittest.cc | 222 +++--
src/bin/auth/tests/run_unittests.cc | 5 +-
src/bin/auth/tests/statistics_unittest.cc | 3 +-
src/bin/auth/tests/testdata/Makefile.am | 2 +-
src/bin/bind10/Makefile.am | 24 +-
src/bin/bind10/bind10.8 | 16 +-
src/bin/bind10/bind10.py.in | 1039 -------------------
src/bin/bind10/bind10.xml | 28 +-
src/bin/bind10/bind10_messages.mes | 200 ++++
src/bin/bind10/bind10_src.py.in | 1078 ++++++++++++++++++++
src/bin/bind10/bob.spec | 11 +
src/bin/bind10/creatorapi.txt | 123 +++
src/bin/bind10/run_bind10.sh.in | 6 +-
src/bin/bind10/tests/Makefile.am | 13 +-
src/bin/bind10/tests/bind10_test.py.in | 85 ++-
src/bin/bindctl/Makefile.am | 7 +
src/bin/bindctl/bindcmd.py | 26 +-
src/bin/bindctl/bindctl_main.py.in | 19 +-
src/bin/bindctl/run_bindctl.sh.in | 10 +-
src/bin/bindctl/tests/Makefile.am | 10 +-
src/bin/cfgmgr/Makefile.am | 5 +
src/bin/cfgmgr/b10-cfgmgr.py.in | 35 +-
src/bin/cfgmgr/plugins/Makefile.am | 13 +-
src/bin/cfgmgr/plugins/b10logging.py | 109 ++
src/bin/cfgmgr/plugins/logging.spec | 81 ++
src/bin/cfgmgr/plugins/tests/Makefile.am | 27 +
src/bin/cfgmgr/plugins/tests/logging_test.py | 135 +++
src/bin/cfgmgr/plugins/tests/tsig_keys_test.py | 103 ++
src/bin/cfgmgr/plugins/tsig_keys.py | 50 +
src/bin/cfgmgr/plugins/tsig_keys.spec | 21 +
src/bin/cfgmgr/tests/Makefile.am | 21 +-
src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in | 3 +
src/bin/cmdctl/Makefile.am | 25 +-
src/bin/cmdctl/cmdctl.py.in | 56 +-
src/bin/cmdctl/cmdctl_messages.mes | 81 ++
src/bin/cmdctl/run_b10-cmdctl.sh.in | 10 +-
src/bin/cmdctl/tests/Makefile.am | 10 +-
src/bin/cmdctl/tests/cmdctl_test.py | 6 +-
src/bin/host/Makefile.am | 32 +-
src/bin/host/README | 16 +-
src/bin/host/b10-host.1 | 118 +++
src/bin/host/b10-host.xml | 196 ++++
src/bin/host/host.cc | 111 ++-
src/bin/loadzone/Makefile.am | 1 +
src/bin/loadzone/run_loadzone.sh.in | 10 +-
src/bin/loadzone/tests/correct/Makefile.am | 11 +-
src/bin/loadzone/tests/correct/correct_test.sh.in | 2 +-
src/bin/loadzone/tests/error/Makefile.am | 11 +-
src/bin/loadzone/tests/error/error_test.sh.in | 2 +-
src/bin/msgq/Makefile.am | 5 +
src/bin/msgq/tests/Makefile.am | 10 +-
src/bin/msgq/tests/msgq_test.py | 4 +-
src/bin/resolver/Makefile.am | 21 +-
src/bin/resolver/b10-resolver.8 | 30 +-
src/bin/resolver/b10-resolver.xml | 32 +-
src/bin/resolver/main.cc | 64 +-
src/bin/resolver/resolver.cc | 292 ++++--
src/bin/resolver/resolver.h | 22 +
src/bin/resolver/resolver.spec.pre.in | 35 +
src/bin/resolver/resolver.spec.pre.win32 | 162 +++
src/bin/resolver/resolver_log.cc | 19 +
src/bin/resolver/resolver_log.h | 49 +
src/bin/resolver/resolver_messages.mes | 248 +++++
src/bin/resolver/spec_config.h.pre.win32 | 15 +
src/bin/resolver/tests/Makefile.am | 17 +-
src/bin/resolver/tests/resolver_config_unittest.cc | 234 ++++-
src/bin/resolver/tests/resolver_unittest.cc | 67 ++-
.../resolver/tests/response_scrubber_unittest.cc | 13 +-
src/bin/resolver/tests/run_unittests.cc | 5 +-
src/bin/sockcreator/README | 2 +-
src/bin/sockcreator/tests/Makefile.am | 5 +-
src/bin/sockcreator/tests/run_unittests.cc | 3 +-
src/bin/stats/Makefile.am | 33 +-
src/bin/stats/b10-stats-httpd.8 | 4 +
src/bin/stats/b10-stats-httpd.xml | 6 +
src/bin/stats/b10-stats.8 | 107 ++-
src/bin/stats/b10-stats.xml | 140 +++-
src/bin/stats/run_b10-stats-httpd.sh.in | 33 -
src/bin/stats/run_b10-stats.sh.in | 33 -
...{stats-httpd-xml.tpl.in => stats-httpd-xml.tpl} | 0
...{stats-httpd-xsd.tpl.in => stats-httpd-xsd.tpl} | 0
...{stats-httpd-xsl.tpl.in => stats-httpd-xsl.tpl} | 0
.../{stats-httpd.spec.in => stats-httpd.spec} | 0
src/bin/stats/stats-schema.spec | 86 ++
src/bin/stats/stats.py.in | 115 ++-
src/bin/stats/stats.spec | 106 ++
src/bin/stats/stats.spec.in | 140 ---
src/bin/stats/stats_httpd.py.in | 130 ++--
src/bin/stats/stats_httpd_messages.mes | 92 ++
src/bin/stats/stats_messages.mes | 75 ++
src/bin/stats/tests/Makefile.am | 15 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 123 ++-
src/bin/stats/tests/b10-stats_test.py | 34 +-
src/bin/stats/tests/http/Makefile.am | 4 +
src/bin/stats/tests/isc/Makefile.am | 7 +-
src/bin/stats/tests/isc/cc/Makefile.am | 5 +
src/bin/stats/tests/isc/cc/session.py | 10 +-
src/bin/stats/tests/isc/config/Makefile.am | 5 +
src/bin/stats/tests/isc/config/ccsession.py | 89 ++
src/bin/stats/tests/isc/log/Makefile.am | 7 +
.../python => bin/stats/tests}/isc/log/__init__.py | 0
src/bin/stats/tests/isc/log_messages/Makefile.am | 7 +
src/bin/stats/tests/isc/log_messages/__init__.py | 18 +
.../tests/isc/log_messages/stats_httpd_messages.py | 16 +
.../stats/tests/isc/log_messages/stats_messages.py | 16 +
src/bin/stats/tests/isc/util/Makefile.am | 5 +
src/bin/stats/tests/stats_test.in | 31 -
src/bin/tests/Makefile.am | 12 +-
src/bin/tests/process_rename_test.py.in | 9 +-
src/bin/xfrin/Makefile.am | 21 +-
src/bin/xfrin/b10-xfrin.8 | 46 +-
src/bin/xfrin/b10-xfrin.xml | 47 +-
src/bin/xfrin/tests/Makefile.am | 4 +-
src/bin/xfrin/tests/xfrin_test.py | 617 +++++++++++-
src/bin/xfrin/xfrin.py.in | 474 +++++++---
src/bin/xfrin/xfrin.spec | 50 +-
src/bin/xfrin/xfrin_messages.mes | 91 ++
src/bin/xfrout/Makefile.am | 20 +-
src/bin/xfrout/b10-xfrout.xml | 8 +
src/bin/xfrout/tests/Makefile.am | 12 +-
src/bin/xfrout/tests/xfrout_test.py.in | 508 +++++++++-
src/bin/xfrout/xfrout.py.in | 384 ++++++--
src/bin/xfrout/xfrout.spec.pre.in | 77 ++-
src/bin/xfrout/xfrout_messages.mes | 162 +++
src/bin/zonemgr/Makefile.am | 21 +-
src/bin/zonemgr/b10-zonemgr.8 | 37 +-
src/bin/zonemgr/b10-zonemgr.xml | 66 +-
src/bin/zonemgr/tests/Makefile.am | 10 +-
src/bin/zonemgr/tests/zonemgr_test.py | 198 +++-
src/bin/zonemgr/zonemgr.py.in | 181 +++--
src/bin/zonemgr/zonemgr_messages.mes | 145 +++
win32build/VS2010/b10-host/b10-host.vcxproj | 98 ++
.../VS2010/b10-host/b10-host.vcxproj.filters | 30 +
.../b10-host.vcxproj.user} | 0
.../VS2010/b10-resolver/b10-resolver.vcxproj | 121 +++
.../b10-resolver/b10-resolver.vcxproj.filters | 57 +
.../b10-resolver.vcxproj.user} | 0
.../b10-resolver_tests/b10-resolver_tests.vcxproj | 109 ++
.../b10-resolver_tests.vcxproj.filters | 63 ++
.../b10-resolver_tests.vcxproj.user} | 0
win32build/VS2010/bind10.sln | 65 ++
163 files changed, 9499 insertions(+), 2771 deletions(-)
create mode 100644 src/bin/auth/auth_log.cc
create mode 100644 src/bin/auth/auth_log.h
create mode 100644 src/bin/auth/auth_messages.mes
delete mode 100755 src/bin/bind10/bind10.py.in
create mode 100644 src/bin/bind10/bind10_messages.mes
create mode 100755 src/bin/bind10/bind10_src.py.in
create mode 100644 src/bin/bind10/creatorapi.txt
mode change 100644 => 100755 src/bin/bind10/run_bind10.sh.in
mode change 100644 => 100755 src/bin/bindctl/run_bindctl.sh.in
create mode 100644 src/bin/cfgmgr/plugins/b10logging.py
create mode 100644 src/bin/cfgmgr/plugins/logging.spec
create mode 100644 src/bin/cfgmgr/plugins/tests/Makefile.am
create mode 100644 src/bin/cfgmgr/plugins/tests/logging_test.py
create mode 100644 src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
create mode 100644 src/bin/cfgmgr/plugins/tsig_keys.py
create mode 100644 src/bin/cfgmgr/plugins/tsig_keys.spec
create mode 100644 src/bin/cmdctl/cmdctl_messages.mes
create mode 100644 src/bin/host/b10-host.1
create mode 100644 src/bin/host/b10-host.xml
mode change 100644 => 100755 src/bin/loadzone/run_loadzone.sh.in
mode change 100644 => 100755 src/bin/loadzone/tests/correct/correct_test.sh.in
mode change 100644 => 100755 src/bin/loadzone/tests/error/error_test.sh.in
create mode 100644 src/bin/resolver/resolver.spec.pre.win32
create mode 100644 src/bin/resolver/resolver_log.cc
create mode 100644 src/bin/resolver/resolver_log.h
create mode 100644 src/bin/resolver/resolver_messages.mes
create mode 100644 src/bin/resolver/spec_config.h.pre.win32
delete mode 100755 src/bin/stats/run_b10-stats-httpd.sh.in
delete mode 100755 src/bin/stats/run_b10-stats.sh.in
rename src/bin/stats/{stats-httpd-xml.tpl.in => stats-httpd-xml.tpl} (100%)
rename src/bin/stats/{stats-httpd-xsd.tpl.in => stats-httpd-xsd.tpl} (100%)
rename src/bin/stats/{stats-httpd-xsl.tpl.in => stats-httpd-xsl.tpl} (100%)
rename src/bin/stats/{stats-httpd.spec.in => stats-httpd.spec} (100%)
create mode 100644 src/bin/stats/stats-schema.spec
mode change 100644 => 100755 src/bin/stats/stats.py.in
create mode 100644 src/bin/stats/stats.spec
delete mode 100644 src/bin/stats/stats.spec.in
mode change 100644 => 100755 src/bin/stats/stats_httpd.py.in
create mode 100644 src/bin/stats/stats_httpd_messages.mes
create mode 100644 src/bin/stats/stats_messages.mes
create mode 100644 src/bin/stats/tests/isc/log/Makefile.am
copy src/{lib/python => bin/stats/tests}/isc/log/__init__.py (100%)
create mode 100644 src/bin/stats/tests/isc/log_messages/Makefile.am
create mode 100644 src/bin/stats/tests/isc/log_messages/__init__.py
create mode 100644 src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
create mode 100644 src/bin/stats/tests/isc/log_messages/stats_messages.py
delete mode 100755 src/bin/stats/tests/stats_test.in
create mode 100644 src/bin/xfrin/xfrin_messages.mes
create mode 100644 src/bin/xfrout/xfrout_messages.mes
create mode 100644 src/bin/zonemgr/zonemgr_messages.mes
create mode 100755 win32build/VS2010/b10-host/b10-host.vcxproj
create mode 100755 win32build/VS2010/b10-host/b10-host.vcxproj.filters
copy win32build/VS2010/{BINDInstall/BINDInstall.vcxproj.user => b10-host/b10-host.vcxproj.user} (100%)
create mode 100755 win32build/VS2010/b10-resolver/b10-resolver.vcxproj
create mode 100755 win32build/VS2010/b10-resolver/b10-resolver.vcxproj.filters
copy win32build/VS2010/{BINDInstall/BINDInstall.vcxproj.user => b10-resolver/b10-resolver.vcxproj.user} (100%)
create mode 100755 win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj
create mode 100755 win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.filters
copy win32build/VS2010/{BINDInstall/BINDInstall.vcxproj.user => b10-resolver_tests/b10-resolver_tests.vcxproj.user} (100%)
-----------------------------------------------------------------------
diff --git a/src/bin/Makefile.am b/src/bin/Makefile.am
index 23d660c..06d8df2 100644
--- a/src/bin/Makefile.am
+++ b/src/bin/Makefile.am
@@ -1,4 +1,4 @@
SUBDIRS = bind10 bindctl cfgmgr loadzone msgq host cmdctl auth xfrin xfrout \
- usermgr zonemgr stats tests resolver sockcreator
+ usermgr zonemgr stats tests resolver sockcreator dhcp6
check-recursive: all-recursive
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 56dc348..e3128b5 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -16,7 +16,8 @@ endif
pkglibexecdir = $(libexecdir)/@PACKAGE@
-CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES += auth_messages.h auth_messages.cc
man_MANS = b10-auth.8
EXTRA_DIST = $(man_MANS) b10-auth.xml
@@ -34,24 +35,34 @@ auth.spec: auth.spec.pre
spec_config.h: spec_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
-BUILT_SOURCES = spec_config.h
+auth_messages.h auth_messages.cc: auth_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/auth/auth_messages.mes
+
+BUILT_SOURCES = spec_config.h auth_messages.h auth_messages.cc
+
pkglibexec_PROGRAMS = b10-auth
b10_auth_SOURCES = query.cc query.h
b10_auth_SOURCES += auth_srv.cc auth_srv.h
+b10_auth_SOURCES += auth_log.cc auth_log.h
b10_auth_SOURCES += change_user.cc change_user.h
b10_auth_SOURCES += auth_config.cc auth_config.h
b10_auth_SOURCES += command.cc command.h
b10_auth_SOURCES += common.h common.cc
b10_auth_SOURCES += statistics.cc statistics.h
b10_auth_SOURCES += main.cc
+
+nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
+EXTRA_DIST += auth_messages.mes
+
b10_auth_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_auth_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_auth_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
b10_auth_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-b10_auth_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+b10_auth_LDADD += $(top_builddir)/src/lib/log/liblog.la
b10_auth_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
b10_auth_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
b10_auth_LDADD += $(SQLITE_LIBS)
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index d88ffb5..2ce044e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -122,6 +122,24 @@
}
]
}
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP ",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
]
}
}
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index 7929d80..d684c68 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -60,6 +60,15 @@ private:
set<string> configured_sources_;
};
+/// A derived \c AuthConfigParser for the version value
+/// (which is not used at this moment)
+class VersionConfig : public AuthConfigParser {
+public:
+ VersionConfig() {}
+ virtual void build(ConstElementPtr) {};
+ virtual void commit() {};
+};
+
void
DatasourcesConfig::build(ConstElementPtr config_value) {
BOOST_FOREACH(ConstElementPtr datasrc_elem, config_value->listValue()) {
@@ -98,7 +107,7 @@ DatasourcesConfig::commit() {
// server implementation details, and isn't scalable wrt the number of
// data source types, and should eventually be improved.
// Currently memory data source for class IN is the only possibility.
- server_.setMemoryDataSrc(RRClass::IN(), AuthSrv::MemoryDataSrcPtr());
+ server_.setInMemoryClient(RRClass::IN(), AuthSrv::InMemoryClientPtr());
BOOST_FOREACH(shared_ptr<AuthConfigParser> datasrc_config, datasources_) {
datasrc_config->commit();
@@ -116,12 +125,12 @@ public:
{}
virtual void build(ConstElementPtr config_value);
virtual void commit() {
- server_.setMemoryDataSrc(rrclass_, memory_datasrc_);
+ server_.setInMemoryClient(rrclass_, memory_client_);
}
private:
AuthSrv& server_;
RRClass rrclass_;
- AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+ AuthSrv::InMemoryClientPtr memory_client_;
};
void
@@ -134,8 +143,8 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
// We'd eventually optimize building zones (in case of reloading) by
// selectively loading fresh zones. Right now we simply check the
// RR class is supported by the server implementation.
- server_.getMemoryDataSrc(rrclass_);
- memory_datasrc_ = AuthSrv::MemoryDataSrcPtr(new MemoryDataSrc());
+ server_.getInMemoryClient(rrclass_);
+ memory_client_ = AuthSrv::InMemoryClientPtr(new InMemoryClient());
ConstElementPtr zones_config = config_value->get("zones");
if (!zones_config) {
@@ -154,9 +163,10 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
isc_throw(AuthConfigError, "Missing zone file for zone: "
<< origin->str());
}
- shared_ptr<MemoryZone> new_zone(new MemoryZone(rrclass_,
+ shared_ptr<InMemoryZoneFinder> zone_finder(new
+ InMemoryZoneFinder(rrclass_,
Name(origin->stringValue())));
- const result::Result result = memory_datasrc_->addZone(new_zone);
+ const result::Result result = memory_client_->addZone(zone_finder);
if (result == result::EXIST) {
isc_throw(AuthConfigError, "zone "<< origin->str()
<< " already exists");
@@ -168,7 +178,7 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
* need the load method to be split into some kind of build and
* commit/abort parts.
*/
- new_zone->load(file->stringValue());
+ zone_finder->load(file->stringValue());
}
}
@@ -293,6 +303,11 @@ createAuthConfigParser(AuthSrv& server, const std::string& config_id,
// we may introduce dynamic registration of configuration parsers,
// and then this test can be done in a cleaner and safer way.
return (new ThrowerCommitConfig());
+ } else if (config_id == "version") {
+ // Currently, the version identifier is ignored, but it should
+ // later be used to mark backwards incompatible changes in the
+ // config data
+ return (new VersionConfig());
} else {
isc_throw(AuthConfigError, "Unknown configuration identifier: " <<
config_id);
diff --git a/src/bin/auth/auth_log.cc b/src/bin/auth/auth_log.cc
new file mode 100644
index 0000000..d41eaea
--- /dev/null
+++ b/src/bin/auth/auth_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the top-level component of b10-auth.
+
+#include "auth_log.h"
+
+namespace isc {
+namespace auth {
+
+isc::log::Logger auth_logger("auth");
+
+} // namespace auth
+} // namespace isc
+
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
new file mode 100644
index 0000000..5205624
--- /dev/null
+++ b/src/bin/auth/auth_log.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_LOG__H
+#define __AUTH_LOG__H
+
+#include <log/macros.h>
+#include <auth/auth_messages.h>
+
+namespace isc {
+namespace auth {
+
+/// \brief Auth Logging
+///
+/// Defines the levels used to output debug messages in the "auth" part of
+/// the b10-auth program. Higher numbers equate to more verbose (and detailed)
+/// output.
+
+// Debug messages indicating normal startup are logged at this debug level.
+const int DBG_AUTH_START = 10;
+
+// Debug level used to log setting information (such as configuration changes).
+const int DBG_AUTH_OPS = 30;
+
+// Trace detailed operations, including errors raised when processing invalid
+// packets. (These are not logged at severities of WARN or higher for fear
+// that a set of deliberately invalid packets set to the authoritative server
+// could overwhelm the logging.)
+const int DBG_AUTH_DETAIL = 50;
+
+// This level is used to log the contents of packets received and sent.
+const int DBG_AUTH_MESSAGES = 70;
+
+/// Define the logger for the "auth" module part of b10-auth. We could define
+/// a logger in each file, but we would want to define a common name to avoid
+/// spelling mistakes, so it is just one small step from there to define a
+/// module-common logger.
+extern isc::log::Logger auth_logger;
+
+} // namespace nsas
+} // namespace isc
+
+#endif // __AUTH_LOG__H
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
new file mode 100644
index 0000000..9f04b76
--- /dev/null
+++ b/src/bin/auth/auth_messages.mes
@@ -0,0 +1,260 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::auth
+
+% AUTH_AXFR_ERROR error handling AXFR request: %1
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+
+% AUTH_AXFR_UDP AXFR query received over UDP
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+
+% AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+
+% AUTH_CONFIG_CHANNEL_CREATED configuration session channel created
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_STARTED configuration session channel started
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+
+% AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+
+% AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+
+% AUTH_DATA_SOURCE data source database file: %1
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+
+% AUTH_DNS_SERVICES_CREATED DNS services created
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+
+% AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+
+% AUTH_LOAD_TSIG loading TSIG keys
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_LOAD_ZONE loaded zone %1/%2
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+
+% AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+
+% AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+
+% AUTH_NO_STATS_SESSION session interface for statistics is not available
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+
+% AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+
+% AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+
+% AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+
+% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+
+% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+
+% AUTH_PACKET_RECEIVED message received:\n%1
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_PROCESS_FAIL message processing failure: %1
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+
+% AUTH_RECEIVED_COMMAND command '%1' received
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+
+% AUTH_RECEIVED_SENDSTATS command 'sendstats' received
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+
+% AUTH_RESPONSE_RECEIVED received response message, ignoring
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+
+% AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SERVER_CREATED server created
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+
+% AUTH_SERVER_FAILED server failed: %1
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+
+% AUTH_SERVER_STARTED server started
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+
+% AUTH_SQLITE3 nothing to do for loading sqlite3
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+
+% AUTH_STATS_CHANNEL_CREATED STATS session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_STATS_COMMS communication error in sending statistics data: %1
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+
+% AUTH_STATS_TIMEOUT timeout while sending statistics data: %1
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+
+% AUTH_STATS_TIMER_DISABLED statistics timer has been disabled
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+
+% AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+
+% AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+
+% AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+
+% AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+
+% AUTH_ZONEMGR_ERROR received error response from zone manager: %1
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+
+
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index a863ef3..5a31442 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -20,6 +20,7 @@
#include <cassert>
#include <iostream>
#include <vector>
+#include <memory>
#include <boost/bind.hpp>
@@ -43,6 +44,7 @@
#include <dns/rrset.h>
#include <dns/rrttl.h>
#include <dns/message.h>
+#include <dns/tsig.h>
#include <datasrc/query.h>
#include <datasrc/data_source.h>
@@ -57,6 +59,7 @@
#include <auth/auth_srv.h>
#include <auth/query.h>
#include <auth/statistics.h>
+#include <auth/auth_log.h>
using namespace std;
@@ -73,6 +76,7 @@ using namespace isc::xfr;
using namespace isc::asiolink;
using namespace isc::asiodns;
using namespace isc::server_common::portconfig;
+using boost::shared_ptr;
class AuthSrvImpl {
private:
@@ -85,11 +89,14 @@ public:
isc::data::ConstElementPtr setDbFile(isc::data::ConstElementPtr config);
bool processNormalQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer);
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
bool processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer);
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
bool processNotify(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer);
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
IOService io_service_;
@@ -98,12 +105,11 @@ public:
/// These members are public because AuthSrv accesses them directly.
ModuleCCSession* config_session_;
- bool verbose_mode_;
AbstractSession* xfrin_session_;
/// In-memory data source. Currently class IN only for simplicity.
- const RRClass memory_datasrc_class_;
- AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+ const RRClass memory_client_class_;
+ AuthSrv::InMemoryClientPtr memory_client_;
/// Hot spot cache
isc::datasrc::HotCache cache_;
@@ -116,6 +122,9 @@ public:
/// Addresses we listen on
AddressList listen_addresses_;
+
+ /// The TSIG keyring
+ const shared_ptr<TSIGKeyRing>* keyring_;
private:
std::string db_file_;
@@ -134,11 +143,12 @@ private:
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
AbstractXfroutClient& xfrout_client) :
- config_session_(NULL), verbose_mode_(false),
+ config_session_(NULL),
xfrin_session_(NULL),
- memory_datasrc_class_(RRClass::IN()),
+ memory_client_class_(RRClass::IN()),
statistics_timer_(io_service_),
- counters_(verbose_mode_),
+ counters_(),
+ keyring_(NULL),
xfrout_connected_(false),
xfrout_client_(xfrout_client)
{
@@ -241,7 +251,9 @@ public:
void
makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
- const Rcode& rcode, const bool verbose_mode)
+ const Rcode& rcode,
+ std::auto_ptr<TSIGContext> tsig_context =
+ std::auto_ptr<TSIGContext>())
{
// extract the parameters that should be kept.
// XXX: with the current implementation, it's not easy to set EDNS0
@@ -272,25 +284,16 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
message->setRcode(rcode);
MessageRenderer renderer(*buffer);
- message->toWire(renderer);
-
- if (verbose_mode) {
- cerr << "[b10-auth] sending an error response (" <<
- renderer.getLength() << " bytes):\n" << message->toText() << endl;
+ if (tsig_context.get() != NULL) {
+ message->toWire(renderer, *tsig_context);
+ } else {
+ message->toWire(renderer);
}
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
+ .arg(renderer.getLength()).arg(*message);
}
}
-void
-AuthSrv::setVerbose(const bool on) {
- impl_->verbose_mode_ = on;
-}
-
-bool
-AuthSrv::getVerbose() const {
- return (impl_->verbose_mode_);
-}
-
IOService&
AuthSrv::getIOService() {
return (impl_->io_service_);
@@ -326,37 +329,34 @@ AuthSrv::getConfigSession() const {
return (impl_->config_session_);
}
-AuthSrv::MemoryDataSrcPtr
-AuthSrv::getMemoryDataSrc(const RRClass& rrclass) {
+AuthSrv::InMemoryClientPtr
+AuthSrv::getInMemoryClient(const RRClass& rrclass) {
// XXX: for simplicity, we only support the IN class right now.
- if (rrclass != impl_->memory_datasrc_class_) {
+ if (rrclass != impl_->memory_client_class_) {
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
}
- return (impl_->memory_datasrc_);
+ return (impl_->memory_client_);
}
void
-AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
- MemoryDataSrcPtr memory_datasrc)
+AuthSrv::setInMemoryClient(const isc::dns::RRClass& rrclass,
+ InMemoryClientPtr memory_client)
{
// XXX: see above
- if (rrclass != impl_->memory_datasrc_class_) {
+ if (rrclass != impl_->memory_client_class_) {
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
- }
- if (impl_->verbose_mode_) {
- if (!impl_->memory_datasrc_ && memory_datasrc) {
- cerr << "[b10-auth] Memory data source is enabled for class "
- << rrclass << endl;
- } else if (impl_->memory_datasrc_ && !memory_datasrc) {
- cerr << "[b10-auth] Memory data source is disabled for class "
- << rrclass << endl;
- }
- }
- impl_->memory_datasrc_ = memory_datasrc;
+ } else if (!impl_->memory_client_ && memory_client) {
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
+ .arg(rrclass);
+ } else if (impl_->memory_client_ && !memory_client) {
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
+ .arg(rrclass);
+ }
+ impl_->memory_client_ = memory_client;
}
uint32_t
@@ -376,18 +376,13 @@ AuthSrv::setStatisticsTimerInterval(uint32_t interval) {
}
if (interval == 0) {
impl_->statistics_timer_.cancel();
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_DISABLED);
} else {
impl_->statistics_timer_.setup(boost::bind(&AuthSrv::submitStatistics,
this),
interval * 1000);
- }
- if (impl_->verbose_mode_) {
- if (interval == 0) {
- cerr << "[b10-auth] Disabled statistics timer" << endl;
- } else {
- cerr << "[b10-auth] Set statistics timer to " << interval
- << " seconds" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_SET)
+ .arg(interval);
}
}
@@ -404,17 +399,13 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
// Ignore all responses.
if (message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] received unexpected response, ignoring"
- << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RESPONSE_RECEIVED);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] DNS packet exception: " << ex.what() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_HEADER_PARSE_FAIL)
+ .arg(ex.what());
server->resume(false);
return;
}
@@ -423,52 +414,63 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
// Parse the message.
message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] returning " << error.getRcode().toText()
- << ": " << error.what() << endl;
- }
- makeErrorMessage(message, buffer, error.getRcode(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+ .arg(error.getRcode().toText()).arg(error.what());
+ makeErrorMessage(message, buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] returning SERVFAIL: " << ex.what() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+ .arg(ex.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL());
server->resume(true);
return;
} // other exceptions will be handled at a higher layer.
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] received a message:\n" << message->toText() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_PACKET_RECEIVED)
+ .arg(message->toText());
// Perform further protocol-level validation.
+ // TSIG first
+ // If this is set to something, we know we need to answer with TSIG as well
+ std::auto_ptr<TSIGContext> tsig_context;
+ const TSIGRecord* tsig_record(message->getTSIGRecord());
+ TSIGError tsig_error(TSIGError::NOERROR());
+
+ // Do we do TSIG?
+ // The keyring can be null if we're in test
+ if (impl_->keyring_ != NULL && tsig_record != NULL) {
+ tsig_context.reset(new TSIGContext(tsig_record->getName(),
+ tsig_record->getRdata().
+ getAlgorithm(),
+ **impl_->keyring_));
+ tsig_error = tsig_context->verify(tsig_record, io_message.getData(),
+ io_message.getDataSize());
+ }
bool sendAnswer = true;
- if (message->getOpcode() == Opcode::NOTIFY()) {
- sendAnswer = impl_->processNotify(io_message, message, buffer);
+ if (tsig_error != TSIGError::NOERROR()) {
+ makeErrorMessage(message, buffer, tsig_error.toRcode(), tsig_context);
+ } else if (message->getOpcode() == Opcode::NOTIFY()) {
+ sendAnswer = impl_->processNotify(io_message, message, buffer,
+ tsig_context);
} else if (message->getOpcode() != Opcode::QUERY()) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] unsupported opcode" << endl;
- }
- makeErrorMessage(message, buffer, Rcode::NOTIMP(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE)
+ .arg(message->getOpcode().toText());
+ makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
} else if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
- makeErrorMessage(message, buffer, Rcode::FORMERR(),
- impl_->verbose_mode_);
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
} else {
ConstQuestionPtr question = *message->beginQuestion();
const RRType &qtype = question->getType();
if (qtype == RRType::AXFR()) {
- sendAnswer = impl_->processAxfrQuery(io_message, message, buffer);
+ sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
+ tsig_context);
} else if (qtype == RRType::IXFR()) {
- makeErrorMessage(message, buffer, Rcode::NOTIMP(),
- impl_->verbose_mode_);
+ makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
} else {
- sendAnswer = impl_->processNormalQuery(io_message, message, buffer);
+ sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
+ tsig_context);
}
}
@@ -477,7 +479,8 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
bool
AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer)
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context)
{
ConstEDNSPtr remote_edns = message->getEDNS();
const bool dnssec_ok = remote_edns && remote_edns->getDNSSECAwareness();
@@ -502,20 +505,17 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
// If a memory data source is configured call the separate
// Query::process()
const ConstQuestionPtr question = *message->beginQuestion();
- if (memory_datasrc_ && memory_datasrc_class_ == question->getClass()) {
+ if (memory_client_ && memory_client_class_ == question->getClass()) {
const RRType& qtype = question->getType();
const Name& qname = question->getName();
- auth::Query(*memory_datasrc_, qname, qtype, *message).process();
+ auth::Query(*memory_client_, qname, qtype, *message).process();
} else {
datasrc::Query query(*message, cache_, dnssec_ok);
data_sources_.doQuery(query);
}
} catch (const Exception& ex) {
- if (verbose_mode_) {
- cerr << "[b10-auth] Internal error, returning SERVFAIL: " <<
- ex.what() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+ LOG_ERROR(auth_logger, AUTH_PROCESS_FAIL).arg(ex.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL());
return (true);
}
@@ -523,29 +523,28 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
const bool udp_buffer =
(io_message.getSocket().getProtocol() == IPPROTO_UDP);
renderer.setLengthLimit(udp_buffer ? remote_bufsize : 65535);
- message->toWire(renderer);
-
- if (verbose_mode_) {
- cerr << "[b10-auth] sending a response ("
- << renderer.getLength()
- << " bytes):\n" << message->toText() << endl;
+ if (tsig_context.get() != NULL) {
+ message->toWire(renderer, *tsig_context);
+ } else {
+ message->toWire(renderer);
}
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_NORMAL_RESPONSE)
+ .arg(renderer.getLength()).arg(message->toText());
return (true);
}
bool
AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer)
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context)
{
// Increment query counter.
incCounter(io_message.getSocket().getProtocol());
if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
- if (verbose_mode_) {
- cerr << "[b10-auth] AXFR query over UDP isn't allowed" << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_UDP);
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
@@ -568,11 +567,9 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
xfrout_connected_ = false;
}
- if (verbose_mode_) {
- cerr << "[b10-auth] Error in handling XFR request: " << err.what()
- << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+ .arg(err.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL(), tsig_context);
return (true);
}
@@ -581,25 +578,22 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
bool
AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer)
+ OutputBufferPtr buffer,
+ std::auto_ptr<TSIGContext> tsig_context)
{
// The incoming notify must contain exactly one question for SOA of the
// zone name.
if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
- if (verbose_mode_) {
- cerr << "[b10-auth] invalid number of questions in notify: "
- << message->getRRCount(Message::SECTION_QUESTION) << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_QUESTIONS)
+ .arg(message->getRRCount(Message::SECTION_QUESTION));
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
ConstQuestionPtr question = *message->beginQuestion();
if (question->getType() != RRType::SOA()) {
- if (verbose_mode_) {
- cerr << "[b10-auth] invalid question RR type in notify: "
- << question->getType() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_RRTYPE)
+ .arg(question->getType().toText());
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
@@ -615,10 +609,7 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
// silent about such cases, but there doesn't seem to be anything we can
// improve at the primary server side by sending an error anyway.
if (xfrin_session_ == NULL) {
- if (verbose_mode_) {
- cerr << "[b10-auth] "
- "session interface for xfrin is not available" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
return (false);
}
@@ -644,16 +635,12 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
int rcode;
parsed_answer = parseAnswer(rcode, answer);
if (rcode != 0) {
- if (verbose_mode_) {
- cerr << "[b10-auth] failed to notify Zonemgr: "
- << parsed_answer->str() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
+ .arg(parsed_answer->str());
return (false);
}
} catch (const Exception& ex) {
- if (verbose_mode_) {
- cerr << "[b10-auth] failed to notify Zonemgr: " << ex.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_ZONEMGR_COMMS).arg(ex.what());
return (false);
}
@@ -662,7 +649,11 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
message->setRcode(Rcode::NOERROR());
MessageRenderer renderer(*buffer);
- message->toWire(renderer);
+ if (tsig_context.get() != NULL) {
+ message->toWire(renderer, *tsig_context);
+ } else {
+ message->toWire(renderer);
+ }
return (true);
}
@@ -709,10 +700,7 @@ AuthSrvImpl::setDbFile(ConstElementPtr config) {
} else {
return (answer);
}
-
- if (verbose_mode_) {
- cerr << "[b10-auth] Data source database file: " << db_file_ << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_DATA_SOURCE).arg(db_file_);
// create SQL data source
// Note: the following step is tricky to be exception-safe and to ensure
@@ -742,9 +730,7 @@ AuthSrv::updateConfig(ConstElementPtr new_config) {
}
return (impl_->setDbFile(new_config));
} catch (const isc::Exception& error) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] error: " << error.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_CONFIG_UPDATE_FAIL).arg(error.what());
return (isc::config::createAnswer(1, error.what()));
}
}
@@ -772,3 +758,8 @@ void
AuthSrv::setDNSService(isc::asiodns::DNSService& dnss) {
dnss_ = &dnss;
}
+
+void
+AuthSrv::setTSIGKeyRing(const shared_ptr<TSIGKeyRing>* keyring) {
+ impl_->keyring_ = keyring;
+}
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 88f00c1..f2259a2 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -17,7 +17,7 @@
#include <string>
-// For MemoryDataSrcPtr below. This should be a temporary definition until
+// For InMemoryClientPtr below. This should be a temporary definition until
// we reorganize the data source framework.
#include <boost/shared_ptr.hpp>
@@ -39,11 +39,14 @@
namespace isc {
namespace datasrc {
-class MemoryDataSrc;
+class InMemoryClient;
}
namespace xfr {
class AbstractXfroutClient;
}
+namespace dns {
+class TSIGKeyRing;
+}
}
@@ -121,27 +124,6 @@ public:
isc::util::OutputBufferPtr buffer,
isc::asiodns::DNSServer* server);
- /// \brief Set verbose flag
- ///
- /// \param on The new value of the verbose flag
-
- /// \brief Enable or disable verbose logging.
- ///
- /// This method never throws an exception.
- ///
- /// \param on \c true to enable verbose logging; \c false to disable
- /// verbose logging.
- void setVerbose(const bool on);
-
- /// \brief Returns the logging verbosity of the \c AuthSrv object.
- ///
- /// This method never throws an exception.
- ///
- /// \return \c true if verbose logging is enabled; otherwise \c false.
-
- /// \brief Get the current value of the verbose flag
- bool getVerbose() const;
-
/// \brief Updates the data source for the \c AuthSrv object.
///
/// This method installs or replaces the data source that the \c AuthSrv
@@ -151,7 +133,7 @@ public:
/// If there is a data source installed, it will be replaced with the
/// new one.
///
- /// In the current implementation, the SQLite data source and MemoryDataSrc
+ /// In the current implementation, the SQLite data source and InMemoryClient
/// are assumed.
/// We can enable memory data source and get the path of SQLite database by
/// the \c config parameter. If we disabled memory data source, the SQLite
@@ -251,16 +233,16 @@ public:
///
void setXfrinSession(isc::cc::AbstractSession* xfrin_session);
- /// A shared pointer type for \c MemoryDataSrc.
+ /// A shared pointer type for \c InMemoryClient.
///
/// This is defined inside the \c AuthSrv class as it's supposed to be
/// a short term interface until we integrate the in-memory and other
/// data source frameworks.
- typedef boost::shared_ptr<isc::datasrc::MemoryDataSrc> MemoryDataSrcPtr;
+ typedef boost::shared_ptr<isc::datasrc::InMemoryClient> InMemoryClientPtr;
- /// An immutable shared pointer type for \c MemoryDataSrc.
- typedef boost::shared_ptr<const isc::datasrc::MemoryDataSrc>
- ConstMemoryDataSrcPtr;
+ /// An immutable shared pointer type for \c InMemoryClient.
+ typedef boost::shared_ptr<const isc::datasrc::InMemoryClient>
+ ConstInMemoryClientPtr;
/// Returns the in-memory data source configured for the \c AuthSrv,
/// if any.
@@ -278,11 +260,11 @@ public:
/// \param rrclass The RR class of the requested in-memory data source.
/// \return A pointer to the in-memory data source, if configured;
/// otherwise NULL.
- MemoryDataSrcPtr getMemoryDataSrc(const isc::dns::RRClass& rrclass);
+ InMemoryClientPtr getInMemoryClient(const isc::dns::RRClass& rrclass);
/// Sets or replaces the in-memory data source of the specified RR class.
///
- /// As noted in \c getMemoryDataSrc(), some RR classes may not be
+ /// As noted in \c getInMemoryClient(), some RR classes may not be
/// supported, in which case an exception of class \c InvalidParameter
/// will be thrown.
/// This method never throws an exception otherwise.
@@ -293,9 +275,9 @@ public:
/// in-memory data source.
///
/// \param rrclass The RR class of the in-memory data source to be set.
- /// \param memory_datasrc A (shared) pointer to \c MemoryDataSrc to be set.
- void setMemoryDataSrc(const isc::dns::RRClass& rrclass,
- MemoryDataSrcPtr memory_datasrc);
+ /// \param memory_datasrc A (shared) pointer to \c InMemoryClient to be set.
+ void setInMemoryClient(const isc::dns::RRClass& rrclass,
+ InMemoryClientPtr memory_client);
/// \brief Set the communication session with Statistics.
///
@@ -374,6 +356,14 @@ public:
/// \brief Assign an ASIO DNS Service queue to this Auth object
void setDNSService(isc::asiodns::DNSService& dnss);
+ /// \brief Sets the keyring used for verifying and signing
+ ///
+ /// The parameter is pointer to shared pointer, because the automatic
+ /// reloading routines of tsig keys replace the actual keyring object.
+ /// It is expected the pointer will point to some statically-allocated
+ /// object, it doesn't take ownership of it.
+ void setTSIGKeyRing(const boost::shared_ptr<isc::dns::TSIGKeyRing>*
+ keyring);
private:
AuthSrvImpl* impl_;
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
.RE
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIstatistics\-interval\fR
is the timer interval in seconds for
\fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
\fBshutdown\fR
exits
\fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
.SH "FILES"
.PP
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,15 +132,6 @@
</para>
<para>
- <varname>listen_on</varname> is a list of addresses and ports for
- <command>b10-auth</command> to listen on.
- The list items are the <varname>address</varname> string
- and <varname>port</varname> number.
- By default, <command>b10-auth</command> listens on port 53
- on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
- </para>
-
- <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>statistics-interval</varname> is the timer interval
in seconds for <command>b10-auth</command> to share its
statistics information to
@@ -209,6 +209,34 @@
</refsect1>
<refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>auth.queries.tcp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over TCP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>auth.queries.udp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over UDP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para>
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index a569147..d51495b 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -12,8 +12,12 @@ query_bench_SOURCES += ../query.h ../query.cc
query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
query_bench_SOURCES += ../auth_config.h ../auth_config.cc
query_bench_SOURCES += ../statistics.h ../statistics.cc
+query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+
+nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+query_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
query_bench_LDADD += $(top_builddir)/src/lib/bench/libbench.la
query_bench_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
@@ -26,3 +30,4 @@ query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
query_bench_LDADD += $(SQLITE_LIBS)
+
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index eafcae8..940d57b 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -27,16 +27,18 @@
#include <config/ccsession.h>
+#include <auth/auth_log.h>
#include <auth/auth_srv.h>
#include <auth/command.h>
-using namespace std;
-using boost::shared_ptr;
using boost::scoped_ptr;
-using namespace isc::dns;
+using boost::shared_ptr;
+using namespace isc::auth;
+using namespace isc::config;
using namespace isc::data;
using namespace isc::datasrc;
-using namespace isc::config;
+using namespace isc::dns;
+using namespace std;
namespace {
/// An exception that is thrown if an error occurs while handling a command
@@ -115,9 +117,7 @@ public:
class SendStatsCommand : public AuthCommand {
public:
virtual void exec(AuthSrv& server, isc::data::ConstElementPtr) {
- if (server.getVerbose()) {
- cerr << "[b10-auth] command 'sendstats' received" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_SENDSTATS);
server.submitStatistics();
}
};
@@ -136,22 +136,21 @@ public:
// that doesn't block other server operations.
// TODO: we may (should?) want to check the "last load time" and
// the timestamp of the file and skip loading if the file isn't newer.
- shared_ptr<MemoryZone> newzone(new MemoryZone(oldzone->getClass(),
- oldzone->getOrigin()));
- newzone->load(oldzone->getFileName());
- oldzone->swap(*newzone);
-
- if (server.getVerbose()) {
- cerr << "[b10-auth] Loaded zone '" << newzone->getOrigin()
- << "'/" << newzone->getClass() << endl;
- }
+ shared_ptr<InMemoryZoneFinder> zone_finder(
+ new InMemoryZoneFinder(old_zone_finder->getClass(),
+ old_zone_finder->getOrigin()));
+ zone_finder->load(old_zone_finder->getFileName());
+ old_zone_finder->swap(*zone_finder);
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
+ .arg(zone_finder->getOrigin()).arg(zone_finder->getClass());
}
private:
- shared_ptr<MemoryZone> oldzone; // zone to be updated with the new file.
+ // zone finder to be updated with the new file.
+ shared_ptr<InMemoryZoneFinder> old_zone_finder;
// A helper private method to parse and validate command parameters.
- // On success, it sets 'oldzone' to the zone to be updated.
+ // On success, it sets 'old_zone_finder' to the zone to be updated.
// It returns true if everything is okay; and false if the command is
// valid but there's no need for further process.
bool validate(AuthSrv& server, isc::data::ConstElementPtr args) {
@@ -164,10 +163,7 @@ private:
ConstElementPtr datasrc_elem = args->get("datasrc");
if (datasrc_elem) {
if (datasrc_elem->stringValue() == "sqlite3") {
- if (server.getVerbose()) {
- cerr << "[b10-auth] Nothing to do for loading sqlite3"
- << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_SQLITE3);
return (false);
} else if (datasrc_elem->stringValue() != "memory") {
// (note: at this point it's guaranteed that datasrc_elem
@@ -182,7 +178,7 @@ private:
const RRClass zone_class = class_elem ?
RRClass(class_elem->stringValue()) : RRClass::IN();
- AuthSrv::MemoryDataSrcPtr datasrc(server.getMemoryDataSrc(zone_class));
+ AuthSrv::InMemoryClientPtr datasrc(server.getInMemoryClient(zone_class));
if (datasrc == NULL) {
isc_throw(AuthCommandError, "Memory data source is disabled");
}
@@ -194,13 +190,14 @@ private:
const Name origin(origin_elem->stringValue());
// Get the current zone
- const MemoryDataSrc::FindResult result = datasrc->findZone(origin);
+ const InMemoryClient::FindResult result = datasrc->findZone(origin);
if (result.code != result::SUCCESS) {
isc_throw(AuthCommandError, "Zone " << origin <<
" is not found in data source");
}
- oldzone = boost::dynamic_pointer_cast<MemoryZone>(result.zone);
+ old_zone_finder = boost::dynamic_pointer_cast<InMemoryZoneFinder>(
+ result.zone_finder);
return (true);
}
@@ -233,18 +230,13 @@ ConstElementPtr
execAuthServerCommand(AuthSrv& server, const string& command_id,
ConstElementPtr args)
{
- if (server.getVerbose()) {
- cerr << "[b10-auth] Received '" << command_id << "' command" << endl;
- }
-
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_COMMAND).arg(command_id);
try {
scoped_ptr<AuthCommand>(createAuthCommand(command_id))->exec(server,
args);
} catch (const isc::Exception& ex) {
- if (server.getVerbose()) {
- cerr << "[b10-auth] Command '" << command_id
- << "' execution failed: " << ex.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_COMMAND_FAILED).arg(command_id)
+ .arg(ex.what());
return (createAnswer(1, ex.what()));
}
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 480c2f7..c8f6762 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,24 +44,26 @@
#include <auth/command.h>
#include <auth/change_user.h>
#include <auth/auth_srv.h>
+#include <auth/auth_log.h>
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
-#include <log/dummylog.h>
+#include <log/logger_support.h>
+#include <server_common/keyring.h>
using namespace std;
-using namespace isc::data;
+using namespace isc::asiodns;
+using namespace isc::asiolink;
+using namespace isc::auth;
using namespace isc::cc;
using namespace isc::config;
+using namespace isc::data;
using namespace isc::dns;
+using namespace isc::log;
using namespace isc::util;
using namespace isc::xfr;
-using namespace isc::asiolink;
-using namespace isc::asiodns;
namespace {
-bool verbose_mode = false;
-
/* need global var for config/command handlers.
* todo: turn this around, and put handlers in the authserver
* class itself? */
@@ -87,6 +89,7 @@ usage() {
cerr << "\t-v: verbose output" << endl;
exit(1);
}
+
} // end of anonymous namespace
int
@@ -94,6 +97,7 @@ main(int argc, char* argv[]) {
int ch;
const char* uid = NULL;
bool cache = true;
+ bool verbose = false;
while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
switch (ch) {
@@ -104,8 +108,7 @@ main(int argc, char* argv[]) {
uid = optarg;
break;
case 'v':
- verbose_mode = true;
- isc::log::denabled = true;
+ verbose = true;
break;
case '?':
default:
@@ -117,6 +120,11 @@ main(int argc, char* argv[]) {
usage();
}
+ // Initialize logging. If verbose, we'll use maximum verbosity.
+ isc::log::initLogger("b10-auth",
+ (verbose ? isc::log::DEBUG : isc::log::INFO),
+ isc::log::MAX_DEBUG_LEVEL, NULL);
+
int ret = 0;
// XXX: we should eventually pass io_service here.
@@ -137,8 +145,7 @@ main(int argc, char* argv[]) {
}
auth_server = new AuthSrv(cache, xfrout_client);
- auth_server->setVerbose(verbose_mode);
- cout << "[b10-auth] Server created." << endl;
+ LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
SimpleCallback* checkin = auth_server->getCheckinProvider();
IOService& io_service = auth_server->getIOService();
@@ -147,27 +154,32 @@ main(int argc, char* argv[]) {
DNSService dns_service(io_service, checkin, lookup, answer);
auth_server->setDNSService(dns_service);
- cout << "[b10-auth] DNSServices created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
cc_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Configuration session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
+ // We delay starting listening to new commands/config just before we
+ // go into the main loop to avoid confusion due to mixture of
+ // synchronous and asynchronous operations (this would happen in
+ // initializing TSIG keys below). Until then all operations on the
+ // CC session will take place synchronously.
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
- my_command_handler);
- cout << "[b10-auth] Configuration channel established." << endl;
+ my_command_handler, false);
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
xfrin_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Xfrin session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
xfrin_session->establish(NULL);
xfrin_session_established = true;
- cout << "[b10-auth] Xfrin session channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
statistics_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Statistics session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_CREATED);
statistics_session->establish(NULL);
statistics_session_established = true;
- cout << "[b10-auth] Statistics session channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_ESTABLISHED);
auth_server->setXfrinSession(xfrin_session);
auth_server->setStatisticsSession(statistics_session);
@@ -176,25 +188,34 @@ main(int argc, char* argv[]) {
// all initial configurations, but as a short term workaround we
// handle the traditional "database_file" setup by directly calling
// updateConfig().
- // if server load configure failed, we won't exit, give user second chance
- // to correct the configure.
+ // if server load configure failed, we won't exit, give user second
+ // chance to correct the configure.
auth_server->setConfigSession(config_session);
try {
configureAuthServer(*auth_server, config_session->getFullConfig());
auth_server->updateConfig(ElementPtr());
} catch (const AuthConfigError& ex) {
- cout << "[bin10-auth] Server load config failed:" << ex.what() << endl;
+ LOG_ERROR(auth_logger, AUTH_CONFIG_LOAD_FAIL).arg(ex.what());
}
if (uid != NULL) {
changeUser(uid);
}
- cout << "[b10-auth] Server started." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_LOAD_TSIG);
+ isc::server_common::initKeyring(*config_session);
+ auth_server->setTSIGKeyRing(&isc::server_common::keyring);
+
+ // Now start asynchronous read.
+ config_session->start();
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_STARTED);
+
+ // Successfully initialized.
+ LOG_INFO(auth_logger, AUTH_SERVER_STARTED);
io_service.run();
} catch (const std::exception& ex) {
- cerr << "[b10-auth] Server failed: " << ex.what() << endl;
+ LOG_FATAL(auth_logger, AUTH_SERVER_FAILED).arg(ex.what());
ret = 1;
}
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 323f890..ab6404e 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -19,7 +19,7 @@
#include <dns/rcode.h>
#include <dns/rdataclass.h>
-#include <datasrc/memory_datasrc.h>
+#include <datasrc/client.h>
#include <auth/query.h>
@@ -31,14 +31,14 @@ namespace isc {
namespace auth {
void
-Query::getAdditional(const Zone& zone, const RRset& rrset) const {
+Query::getAdditional(ZoneFinder& zone, const RRset& rrset) const {
RdataIteratorPtr rdata_iterator(rrset.getRdataIterator());
for (; !rdata_iterator->isLast(); rdata_iterator->next()) {
const Rdata& rdata(rdata_iterator->getCurrent());
if (rrset.getType() == RRType::NS()) {
// Need to perform the search in the "GLUE OK" mode.
const generic::NS& ns = dynamic_cast<const generic::NS&>(rdata);
- findAddrs(zone, ns.getNSName(), Zone::FIND_GLUE_OK);
+ findAddrs(zone, ns.getNSName(), ZoneFinder::FIND_GLUE_OK);
} else if (rrset.getType() == RRType::MX()) {
const generic::MX& mx(dynamic_cast<const generic::MX&>(rdata));
findAddrs(zone, mx.getMXName());
@@ -47,8 +47,8 @@ Query::getAdditional(const Zone& zone, const RRset& rrset) const {
}
void
-Query::findAddrs(const Zone& zone, const Name& qname,
- const Zone::FindOptions options) const
+Query::findAddrs(ZoneFinder& zone, const Name& qname,
+ const ZoneFinder::FindOptions options) const
{
// Out of zone name
NameComparisonResult result = zone.getOrigin().compare(qname);
@@ -66,30 +66,31 @@ Query::findAddrs(const Zone& zone, const Name& qname,
// Find A rrset
if (qname_ != qname || qtype_ != RRType::A()) {
- Zone::FindResult a_result = zone.find(qname, RRType::A(), NULL,
- options);
- if (a_result.code == Zone::SUCCESS) {
+ ZoneFinder::FindResult a_result = zone.find(qname, RRType::A(), NULL,
+ options | dnssec_opt_);
+ if (a_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(a_result.rrset));
+ boost::const_pointer_cast<RRset>(a_result.rrset), dnssec_);
}
}
// Find AAAA rrset
if (qname_ != qname || qtype_ != RRType::AAAA()) {
- Zone::FindResult aaaa_result =
- zone.find(qname, RRType::AAAA(), NULL, options);
- if (aaaa_result.code == Zone::SUCCESS) {
+ ZoneFinder::FindResult aaaa_result =
+ zone.find(qname, RRType::AAAA(), NULL, options | dnssec_opt_);
+ if (aaaa_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(aaaa_result.rrset));
+ boost::const_pointer_cast<RRset>(aaaa_result.rrset),
+ dnssec_);
}
}
}
void
-Query::putSOA(const Zone& zone) const {
- Zone::FindResult soa_result(zone.find(zone.getOrigin(),
- RRType::SOA()));
- if (soa_result.code != Zone::SUCCESS) {
+Query::putSOA(ZoneFinder& zone) const {
+ ZoneFinder::FindResult soa_result(zone.find(zone.getOrigin(),
+ RRType::SOA(), NULL, dnssec_opt_));
+ if (soa_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoSOA, "There's no SOA record in zone " <<
zone.getOrigin().toText());
} else {
@@ -99,21 +100,23 @@ Query::putSOA(const Zone& zone) const {
* to insist.
*/
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(soa_result.rrset));
+ boost::const_pointer_cast<RRset>(soa_result.rrset), dnssec_);
}
}
void
-Query::getAuthAdditional(const Zone& zone) const {
+Query::getAuthAdditional(ZoneFinder& zone) const {
// Fill in authority and addtional sections.
- Zone::FindResult ns_result = zone.find(zone.getOrigin(), RRType::NS());
+ ZoneFinder::FindResult ns_result = zone.find(zone.getOrigin(),
+ RRType::NS(), NULL,
+ dnssec_opt_);
// zone origin name should have NS records
- if (ns_result.code != Zone::SUCCESS) {
+ if (ns_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoApexNS, "There's no apex NS records in zone " <<
zone.getOrigin().toText());
} else {
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(ns_result.rrset));
+ boost::const_pointer_cast<RRset>(ns_result.rrset), dnssec_);
// Handle additional for authority section
getAdditional(zone, *ns_result.rrset);
}
@@ -125,8 +128,8 @@ Query::process() const {
const bool qtype_is_any = (qtype_ == RRType::ANY());
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
- const MemoryDataSrc::FindResult result =
- memory_datasrc_.findZone(qname_);
+ const DataSourceClient::FindResult result =
+ datasrc_client_.findZone(qname_);
// If we have no matching authoritative zone for the query name, return
// REFUSED. In short, this is to be compatible with BIND 9, but the
@@ -145,14 +148,15 @@ Query::process() const {
while (keep_doing) {
keep_doing = false;
std::auto_ptr<RRsetList> target(qtype_is_any ? new RRsetList : NULL);
- const Zone::FindResult db_result(result.zone->find(qname_, qtype_,
- target.get()));
-
+ const ZoneFinder::FindResult db_result(
+ result.zone_finder->find(qname_, qtype_, target.get(),
+ dnssec_opt_));
switch (db_result.code) {
- case Zone::DNAME: {
+ case ZoneFinder::DNAME: {
// First, put the dname into the answer
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
/*
* Empty DNAME should never get in, as it is impossible to
* create one in master file.
@@ -188,10 +192,10 @@ Query::process() const {
qname_.getLabelCount() -
db_result.rrset->getName().getLabelCount()).
concatenate(dname.getDname())));
- response_.addRRset(Message::SECTION_ANSWER, cname);
+ response_.addRRset(Message::SECTION_ANSWER, cname, dnssec_);
break;
}
- case Zone::CNAME:
+ case ZoneFinder::CNAME:
/*
* We don't do chaining yet. Therefore handling a CNAME is
* mostly the same as handling SUCCESS, but we didn't get
@@ -202,48 +206,59 @@ Query::process() const {
* So, just put it there.
*/
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
break;
- case Zone::SUCCESS:
+ case ZoneFinder::SUCCESS:
if (qtype_is_any) {
// If quety type is ANY, insert all RRs under the domain
// into answer section.
BOOST_FOREACH(RRsetPtr rrset, *target) {
- response_.addRRset(Message::SECTION_ANSWER, rrset);
+ response_.addRRset(Message::SECTION_ANSWER, rrset,
+ dnssec_);
// Handle additional for answer section
- getAdditional(*result.zone, *rrset.get());
+ getAdditional(*result.zone_finder, *rrset.get());
}
} else {
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
// Handle additional for answer section
- getAdditional(*result.zone, *db_result.rrset);
+ getAdditional(*result.zone_finder, *db_result.rrset);
}
// If apex NS records haven't been provided in the answer
// section, insert apex NS records into the authority section
// and AAAA/A RRS of each of the NS RDATA into the additional
// section.
- if (qname_ != result.zone->getOrigin() ||
- db_result.code != Zone::SUCCESS ||
+ if (qname_ != result.zone_finder->getOrigin() ||
+ db_result.code != ZoneFinder::SUCCESS ||
(qtype_ != RRType::NS() && !qtype_is_any))
{
- getAuthAdditional(*result.zone);
+ getAuthAdditional(*result.zone_finder);
}
break;
- case Zone::DELEGATION:
+ case ZoneFinder::DELEGATION:
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(db_result.rrset));
- getAdditional(*result.zone, *db_result.rrset);
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
+ getAdditional(*result.zone_finder, *db_result.rrset);
break;
- case Zone::NXDOMAIN:
+ case ZoneFinder::NXDOMAIN:
// Just empty answer with SOA in authority section
response_.setRcode(Rcode::NXDOMAIN());
- putSOA(*result.zone);
+ putSOA(*result.zone_finder);
break;
- case Zone::NXRRSET:
+ case ZoneFinder::NXRRSET:
// Just empty answer with SOA in authority section
- putSOA(*result.zone);
+ putSOA(*result.zone_finder);
+ break;
+ default:
+ // These are new result codes (WILDCARD and WILDCARD_NXRRSET)
+ // They should not happen from the in-memory and the database
+ // backend isn't used yet.
+ // TODO: Implement before letting the database backends in
+ isc_throw(isc::NotImplemented, "Unknown result code");
break;
}
}
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index e0c6323..0ebbed8 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -26,7 +26,7 @@ class RRset;
}
namespace datasrc {
-class MemoryDataSrc;
+class DataSourceClient;
}
namespace auth {
@@ -36,10 +36,8 @@ namespace auth {
///
/// Many of the design details for this class are still in flux.
/// We'll revisit and update them as we add more functionality, for example:
-/// - memory_datasrc parameter of the constructor. It is a data source that
-/// uses in memory dedicated backend.
/// - as a related point, we may have to pass the RR class of the query.
-/// in the initial implementation the RR class is an attribute of memory
+/// in the initial implementation the RR class is an attribute of
/// datasource and omitted. It's not clear if this assumption holds with
/// generic data sources. On the other hand, it will help keep
/// implementation simpler, and we might rather want to modify the design
@@ -51,7 +49,7 @@ namespace auth {
/// separate attribute setter.
/// - likewise, we'll eventually need to do per zone access control, for which
/// we need querier's information such as its IP address.
-/// - memory_datasrc and response may better be parameters to process() instead
+/// - datasrc_client and response may better be parameters to process() instead
/// of the constructor.
///
/// <b>Note:</b> The class name is intentionally the same as the one used in
@@ -71,7 +69,7 @@ private:
/// Adds a SOA of the zone into the authority zone of response_.
/// Can throw NoSOA.
///
- void putSOA(const isc::datasrc::Zone& zone) const;
+ void putSOA(isc::datasrc::ZoneFinder& zone) const;
/// \brief Look up additional data (i.e., address records for the names
/// included in NS or MX records).
@@ -83,11 +81,11 @@ private:
/// This method may throw a exception because its underlying methods may
/// throw exceptions.
///
- /// \param zone The Zone wherein the additional data to the query is bo be
- /// found.
+ /// \param zone The ZoneFinder through which the additional data for the
+ /// query is to be found.
/// \param rrset The RRset (i.e., NS or MX rrset) which require additional
/// processing.
- void getAdditional(const isc::datasrc::Zone& zone,
+ void getAdditional(isc::datasrc::ZoneFinder& zone,
const isc::dns::RRset& rrset) const;
/// \brief Find address records for a specified name.
@@ -102,18 +100,19 @@ private:
/// The glue records must exactly match the name in the NS RDATA, without
/// CNAME or wildcard processing.
///
- /// \param zone The \c Zone wherein the address records is to be found.
+ /// \param zone The \c ZoneFinder through which the address records is to
+ /// be found.
/// \param qname The name in rrset RDATA.
/// \param options The search options.
- void findAddrs(const isc::datasrc::Zone& zone,
+ void findAddrs(isc::datasrc::ZoneFinder& zone,
const isc::dns::Name& qname,
- const isc::datasrc::Zone::FindOptions options
- = isc::datasrc::Zone::FIND_DEFAULT) const;
+ const isc::datasrc::ZoneFinder::FindOptions options
+ = isc::datasrc::ZoneFinder::FIND_DEFAULT) const;
- /// \brief Look up \c Zone's NS and address records for the NS RDATA
- /// (domain name) for authoritative answer.
+ /// \brief Look up a zone's NS RRset and their address records for an
+ /// authoritative answer.
///
- /// On returning an authoritative answer, insert the \c Zone's NS into the
+ /// On returning an authoritative answer, insert a zone's NS into the
/// authority section and AAAA/A RRs of each of the NS RDATA into the
/// additional section.
///
@@ -126,25 +125,29 @@ private:
/// include AAAA/A RRs under a zone cut in additional section. (BIND 9
/// excludes under-cut RRs; NSD include them.)
///
- /// \param zone The \c Zone wherein the additional data to the query is to
- /// be found.
- void getAuthAdditional(const isc::datasrc::Zone& zone) const;
+ /// \param zone The \c ZoneFinder through which the NS and additional data
+ /// for the query are to be found.
+ void getAuthAdditional(isc::datasrc::ZoneFinder& zone) const;
public:
/// Constructor from query parameters.
///
/// This constructor never throws an exception.
///
- /// \param memory_datasrc The memory datasource wherein the answer to the query is
+ /// \param datasrc_client The datasource wherein the answer to the query is
/// to be found.
/// \param qname The query name
/// \param qtype The RR type of the query
/// \param response The response message to store the answer to the query.
- Query(const isc::datasrc::MemoryDataSrc& memory_datasrc,
+ /// \param dnssec If the answer should include signatures and NSEC/NSEC3 if
+ /// possible.
+ Query(const isc::datasrc::DataSourceClient& datasrc_client,
const isc::dns::Name& qname, const isc::dns::RRType& qtype,
- isc::dns::Message& response) :
- memory_datasrc_(memory_datasrc), qname_(qname), qtype_(qtype),
- response_(response)
+ isc::dns::Message& response, bool dnssec = false) :
+ datasrc_client_(datasrc_client), qname_(qname), qtype_(qtype),
+ response_(response), dnssec_(dnssec),
+ dnssec_opt_(dnssec ? isc::datasrc::ZoneFinder::FIND_DNSSEC :
+ isc::datasrc::ZoneFinder::FIND_DEFAULT)
{}
/// Process the query.
@@ -157,7 +160,7 @@ public:
/// successful search would result in adding a corresponding RRset to
/// the answer section of the response.
///
- /// If no matching zone is found in the memory datasource, the RCODE of
+ /// If no matching zone is found in the datasource, the RCODE of
/// SERVFAIL will be set in the response.
/// <b>Note:</b> this is different from the error code that BIND 9 returns
/// by default when it's configured as an authoritative-only server (and
@@ -208,10 +211,12 @@ public:
};
private:
- const isc::datasrc::MemoryDataSrc& memory_datasrc_;
+ const isc::datasrc::DataSourceClient& datasrc_client_;
const isc::dns::Name& qname_;
const isc::dns::RRType& qtype_;
isc::dns::Message& response_;
+ const bool dnssec_;
+ const isc::datasrc::ZoneFinder::FindOptions dnssec_opt_;
};
}
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index e68793c..76e5007 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <auth/statistics.h>
+#include <auth/auth_log.h>
#include <cc/data.h>
#include <cc/session.h>
@@ -20,6 +21,8 @@
#include <sstream>
#include <iostream>
+using namespace isc::auth;
+
// TODO: We need a namespace ("auth_server"?) to hold
// AuthSrv and AuthCounters.
@@ -29,10 +32,7 @@ private:
AuthCountersImpl(const AuthCountersImpl& source);
AuthCountersImpl& operator=(const AuthCountersImpl& source);
public:
- // References verbose_mode flag in AuthSrvImpl
- // TODO: Fix this short term workaround for logging
- // after we have logging framework
- AuthCountersImpl(const bool& verbose_mode);
+ AuthCountersImpl();
~AuthCountersImpl();
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
@@ -42,15 +42,13 @@ public:
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
- const bool& verbose_mode_;
};
-AuthCountersImpl::AuthCountersImpl(const bool& verbose_mode) :
+AuthCountersImpl::AuthCountersImpl() :
// initialize counter
// size: AuthCounters::COUNTER_TYPES, initial value: 0
counters_(AuthCounters::COUNTER_TYPES, 0),
- statistics_session_(NULL),
- verbose_mode_(verbose_mode)
+ statistics_session_(NULL)
{}
AuthCountersImpl::~AuthCountersImpl()
@@ -64,11 +62,7 @@ AuthCountersImpl::inc(const AuthCounters::CounterType type) {
bool
AuthCountersImpl::submitStatistics() const {
if (statistics_session_ == NULL) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "session interface for statistics"
- << " is not available" << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_NO_STATS_SESSION);
return (false);
}
std::stringstream statistics_string;
@@ -91,26 +85,14 @@ AuthCountersImpl::submitStatistics() const {
const int seq =
statistics_session_->group_sendmsg(statistics_element, "Stats");
isc::data::ConstElementPtr env, answer;
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "send statistics data" << std::endl;
- }
// TODO: parse and check response from statistics module
// currently it just returns empty message
statistics_session_->group_recvmsg(env, answer, false, seq);
} catch (const isc::cc::SessionError& ex) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "communication error in sending statistics data: "
- << ex.what() << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_STATS_COMMS).arg(ex.what());
return (false);
} catch (const isc::cc::SessionTimeout& ex) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "timeout happened while sending statistics data: "
- << ex.what() << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_STATS_TIMEOUT).arg(ex.what());
return (false);
}
return (true);
@@ -129,8 +111,7 @@ AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
return (counters_.at(type));
}
-AuthCounters::AuthCounters(const bool& verbose_mode) :
- impl_(new AuthCountersImpl(verbose_mode))
+AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
{}
AuthCounters::~AuthCounters() {
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 9e5240e..5bf6436 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -61,15 +61,10 @@ public:
};
/// The constructor.
///
- /// \param verbose_mode reference to verbose_mode_ of AuthSrvImpl
- ///
/// This constructor is mostly exception free. But it may still throw
/// a standard exception if memory allocation fails inside the method.
///
- /// \todo Fix this short term workaround for logging
- /// after we have logging framework.
- ///
- AuthCounters(const bool& verbose_mode);
+ AuthCounters();
/// The destructor.
///
/// This method never throws an exception.
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index 050373a..5cd2f5a 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -22,6 +22,7 @@ TESTS += run_unittests
run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
run_unittests_SOURCES += ../auth_srv.h ../auth_srv.cc
+run_unittests_SOURCES += ../auth_log.h ../auth_log.cc
run_unittests_SOURCES += ../query.h ../query.cc
run_unittests_SOURCES += ../change_user.h ../change_user.cc
run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
@@ -36,6 +37,9 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += change_user_unittest.cc
run_unittests_SOURCES += statistics_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
@@ -43,6 +47,7 @@ run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
@@ -52,6 +57,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index a77f7e6..4698588 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -16,6 +16,8 @@
#include <vector>
+#include <boost/shared_ptr.hpp>
+
#include <gtest/gtest.h>
#include <dns/message.h>
@@ -25,8 +27,10 @@
#include <dns/rrtype.h>
#include <dns/rrttl.h>
#include <dns/rdataclass.h>
+#include <dns/tsig.h>
#include <server_common/portconfig.h>
+#include <server_common/keyring.h>
#include <datasrc/memory_datasrc.h>
#include <auth/auth_srv.h>
@@ -50,6 +54,7 @@ using namespace isc::asiolink;
using namespace isc::testutils;
using namespace isc::server_common::portconfig;
using isc::UnitTestUtil;
+using boost::shared_ptr;
namespace {
const char* const CONFIG_TESTDB =
@@ -185,15 +190,6 @@ TEST_F(AuthSrvTest, unsupportedRequest) {
unsupportedRequest();
}
-// Simple API check
-TEST_F(AuthSrvTest, verbose) {
- EXPECT_FALSE(server.getVerbose());
- server.setVerbose(true);
- EXPECT_TRUE(server.getVerbose());
- server.setVerbose(false);
- EXPECT_FALSE(server.getVerbose());
-}
-
// Multiple questions. Should result in FORMERR.
TEST_F(AuthSrvTest, multiQuestion) {
multiQuestion();
@@ -242,6 +238,139 @@ TEST_F(AuthSrvTest, AXFRSuccess) {
EXPECT_TRUE(xfrout.isConnected());
}
+// Try giving the server a TSIG signed request and see it can anwer signed as
+// well
+TEST_F(AuthSrvTest, TSIGSigned) {
+ // Prepare key, the client message, etc
+ const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("version.bind"), RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Run the message through the server
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ keyring->add(key);
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ // What did we get?
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::NOERROR(),
+ opcode.getCode(), QR_FLAG | AA_FLAG, 1, 1, 1, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) << "Missing TSIG signature";
+ TSIGError error(context.verify(tsig, response_obuffer->getData(),
+ response_obuffer->getLength()));
+ EXPECT_EQ(TSIGError::NOERROR(), error) <<
+ "The server signed the response, but it doesn't seem to be valid";
+}
+
+// Give the server a signed request, but don't give it the key. It will
+// not be able to verify it, returning BADKEY
+TEST_F(AuthSrvTest, TSIGSignedBadKey) {
+ TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("version.bind"), RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Process the message, but use a different key there
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, TSIGError::BAD_KEY().toRcode(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) <<
+ "Missing TSIG signature (we should have one even at error)";
+ EXPECT_EQ(TSIGError::BAD_KEY_CODE, tsig->getRdata().getError());
+ EXPECT_EQ(0, tsig->getRdata().getMACSize()) <<
+ "It should be unsigned with this error";
+}
+
+// Give the server a signed request, but signed by a different key
+// (with the same name). It should return BADSIG
+TEST_F(AuthSrvTest, TSIGBadSig) {
+ TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("version.bind"), RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Process the message, but use a different key there
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ keyring->add(TSIGKey("key:QkFECg==:hmac-sha1"));
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, TSIGError::BAD_SIG().toRcode(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) <<
+ "Missing TSIG signature (we should have one even at error)";
+ EXPECT_EQ(TSIGError::BAD_SIG_CODE, tsig->getRdata().getError());
+ EXPECT_EQ(0, tsig->getRdata().getMACSize()) <<
+ "It should be unsigned with this error";
+}
+
+// Give the server a signed unsupported request with a bad signature.
+// This checks the server first verifies the signature before anything
+// else.
+TEST_F(AuthSrvTest, TSIGCheckFirst) {
+ TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ // Pass a wrong opcode there. The server shouldn't know what to do
+ // about it.
+ UnitTestUtil::createRequestMessage(request_message, Opcode::RESERVED14(),
+ default_qid, Name("version.bind"),
+ RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Process the message, but use a different key there
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ keyring->add(TSIGKey("key:QkFECg==:hmac-sha1"));
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, TSIGError::BAD_SIG().toRcode(),
+ Opcode::RESERVED14().getCode(), QR_FLAG, 0, 0, 0, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) <<
+ "Missing TSIG signature (we should have one even at error)";
+ EXPECT_EQ(TSIGError::BAD_SIG_CODE, tsig->getRdata().getError());
+ EXPECT_EQ(0, tsig->getRdata().getMACSize()) <<
+ "It should be unsigned with this error";
+}
+
TEST_F(AuthSrvTest, AXFRConnectFail) {
EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
xfrout.disableConnect();
@@ -522,17 +651,17 @@ TEST_F(AuthSrvTest, updateConfigFail) {
QR_FLAG | AA_FLAG, 1, 1, 1, 0);
}
-TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, updateWithInMemoryClient) {
// Test configuring memory data source. Detailed test cases are covered
// in the configuration tests. We only check the AuthSrv interface here.
// By default memory data source isn't enabled
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
updateConfig(&server,
"{\"datasources\": [{\"type\": \"memory\"}]}", true);
// after successful configuration, we should have one (with empty zoneset).
- ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
// The memory data source is empty, should return REFUSED rcode.
createDataFromFile("examplequery_fromWire.wire");
@@ -543,7 +672,7 @@ TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
}
-TEST_F(AuthSrvTest, chQueryWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, chQueryWithInMemoryClient) {
// Configure memory data source for class IN
updateConfig(&server, "{\"datasources\": "
"[{\"class\": \"IN\", \"type\": \"memory\"}]}", true);
diff --git a/src/bin/auth/tests/command_unittest.cc b/src/bin/auth/tests/command_unittest.cc
index 3fdd086..8a82367 100644
--- a/src/bin/auth/tests/command_unittest.cc
+++ b/src/bin/auth/tests/command_unittest.cc
@@ -48,9 +48,9 @@ using namespace isc::datasrc;
using namespace isc::config;
namespace {
-class AuthConmmandTest : public ::testing::Test {
+class AuthCommandTest : public ::testing::Test {
protected:
- AuthConmmandTest() : server(false, xfrout), rcode(-1) {
+ AuthCommandTest() : server(false, xfrout), rcode(-1) {
server.setStatisticsSession(&statistics_session);
}
void checkAnswer(const int expected_code) {
@@ -60,21 +60,20 @@ protected:
MockSession statistics_session;
MockXfroutClient xfrout;
AuthSrv server;
- AuthSrv::ConstMemoryDataSrcPtr memory_datasrc;
ConstElementPtr result;
int rcode;
public:
void stopServer(); // need to be public for boost::bind
};
-TEST_F(AuthConmmandTest, unknownCommand) {
+TEST_F(AuthCommandTest, unknownCommand) {
result = execAuthServerCommand(server, "no_such_command",
ConstElementPtr());
parseAnswer(rcode, result);
EXPECT_EQ(1, rcode);
}
-TEST_F(AuthConmmandTest, DISABLED_unexpectedException) {
+TEST_F(AuthCommandTest, DISABLED_unexpectedException) {
// execAuthServerCommand() won't catch standard exceptions.
// Skip this test for now: ModuleCCSession doesn't seem to validate
// commands.
@@ -83,7 +82,7 @@ TEST_F(AuthConmmandTest, DISABLED_unexpectedException) {
runtime_error);
}
-TEST_F(AuthConmmandTest, sendStatistics) {
+TEST_F(AuthCommandTest, sendStatistics) {
result = execAuthServerCommand(server, "sendstats", ConstElementPtr());
// Just check some message has been sent. Detailed tests specific to
// statistics are done in its own tests.
@@ -92,15 +91,15 @@ TEST_F(AuthConmmandTest, sendStatistics) {
}
void
-AuthConmmandTest::stopServer() {
+AuthCommandTest::stopServer() {
result = execAuthServerCommand(server, "shutdown", ConstElementPtr());
parseAnswer(rcode, result);
assert(rcode == 0); // make sure the test stops when something is wrong
}
-TEST_F(AuthConmmandTest, shutdown) {
+TEST_F(AuthCommandTest, shutdown) {
isc::asiolink::IntervalTimer itimer(server.getIOService());
- itimer.setup(boost::bind(&AuthConmmandTest::stopServer, this), 1);
+ itimer.setup(boost::bind(&AuthCommandTest::stopServer, this), 1);
server.getIOService().run();
EXPECT_EQ(0, rcode);
}
@@ -110,18 +109,18 @@ TEST_F(AuthConmmandTest, shutdown) {
// zones, and checks the zones are correctly loaded.
void
zoneChecks(AuthSrv& server) {
- EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::AAAA()).code);
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::AAAA()).code);
}
@@ -147,25 +146,25 @@ configureZones(AuthSrv& server) {
void
newZoneChecks(AuthSrv& server) {
- EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::A()).code);
// now test1.example should have ns/AAAA
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::AAAA()).code);
// test2.example shouldn't change
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::AAAA()).code);
}
-TEST_F(AuthConmmandTest, loadZone) {
+TEST_F(AuthCommandTest, loadZone) {
configureZones(server);
ASSERT_EQ(0, system(INSTALL_PROG " " TEST_DATA_DIR
@@ -182,7 +181,7 @@ TEST_F(AuthConmmandTest, loadZone) {
newZoneChecks(server);
}
-TEST_F(AuthConmmandTest, loadBrokenZone) {
+TEST_F(AuthCommandTest, loadBrokenZone) {
configureZones(server);
ASSERT_EQ(0, system(INSTALL_PROG " " TEST_DATA_DIR
@@ -195,7 +194,7 @@ TEST_F(AuthConmmandTest, loadBrokenZone) {
zoneChecks(server); // zone shouldn't be replaced
}
-TEST_F(AuthConmmandTest, loadUnreadableZone) {
+TEST_F(AuthCommandTest, loadUnreadableZone) {
configureZones(server);
// install the zone file as unreadable
@@ -209,7 +208,7 @@ TEST_F(AuthConmmandTest, loadUnreadableZone) {
zoneChecks(server); // zone shouldn't be replaced
}
-TEST_F(AuthConmmandTest, loadZoneWithoutDataSrc) {
+TEST_F(AuthCommandTest, loadZoneWithoutDataSrc) {
// try to execute load command without configuring the zone beforehand.
// it should fail.
result = execAuthServerCommand(server, "loadzone",
@@ -218,7 +217,7 @@ TEST_F(AuthConmmandTest, loadZoneWithoutDataSrc) {
checkAnswer(1);
}
-TEST_F(AuthConmmandTest, loadSqlite3DataSrc) {
+TEST_F(AuthCommandTest, loadSqlite3DataSrc) {
// For sqlite3 data source we don't have to do anything (the data source
// (re)loads itself automatically)
result = execAuthServerCommand(server, "loadzone",
@@ -228,7 +227,7 @@ TEST_F(AuthConmmandTest, loadSqlite3DataSrc) {
checkAnswer(0);
}
-TEST_F(AuthConmmandTest, loadZoneInvalidParams) {
+TEST_F(AuthCommandTest, loadZoneInvalidParams) {
configureZones(server);
// null arg
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index 7658b84..dadb0ee 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -57,12 +57,12 @@ protected:
TEST_F(AuthConfigTest, datasourceConfig) {
// By default, we don't have any in-memory data source.
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
configureAuthServer(server, Element::fromJSON(
"{\"datasources\": [{\"type\": \"memory\"}]}"));
// after successful configuration, we should have one (with empty zoneset).
- ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(AuthConfigTest, databaseConfig) {
@@ -74,8 +74,15 @@ TEST_F(AuthConfigTest, databaseConfig) {
"{\"database_file\": \"should_be_ignored\"}")));
}
+TEST_F(AuthConfigTest, versionConfig) {
+ // make sure it does not throw on 'version'
+ EXPECT_NO_THROW(configureAuthServer(
+ server,
+ Element::fromJSON("{\"version\": 0}")));
+}
+
TEST_F(AuthConfigTest, exceptionGuarantee) {
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
// This configuration contains an invalid item, which will trigger
// an exception.
EXPECT_THROW(configureAuthServer(
@@ -85,7 +92,7 @@ TEST_F(AuthConfigTest, exceptionGuarantee) {
" \"no_such_config_var\": 1}")),
AuthConfigError);
// The server state shouldn't change
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(AuthConfigTest, exceptionConversion) {
@@ -147,22 +154,22 @@ protected:
TEST_F(MemoryDatasrcConfigTest, addZeroDataSrc) {
parser->build(Element::fromJSON("[]"));
parser->commit();
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(MemoryDatasrcConfigTest, addEmpty) {
// By default, we don't have any in-memory data source.
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
parser->build(Element::fromJSON("[{\"type\": \"memory\"}]"));
parser->commit();
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, addZeroZone) {
parser->build(Element::fromJSON("[{\"type\": \"memory\","
" \"zones\": []}]"));
parser->commit();
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, addOneZone) {
@@ -172,10 +179,10 @@ TEST_F(MemoryDatasrcConfigTest, addOneZone) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
// Check it actually loaded something
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(rrclass)->findZone(
- Name("ns.example.com.")).zone->find(Name("ns.example.com."),
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(rrclass)->findZone(
+ Name("ns.example.com.")).zone_finder->find(Name("ns.example.com."),
RRType::A()).code);
}
@@ -192,7 +199,7 @@ TEST_F(MemoryDatasrcConfigTest, addMultiZones) {
" \"file\": \"" TEST_DATA_DIR
"/example.net.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(3, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(3, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, replace) {
@@ -202,9 +209,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
// create a new parser, and install a new set of configuration. It
@@ -220,9 +227,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
" \"file\": \"" TEST_DATA_DIR
"/example.net.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(2, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(2, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::NOTFOUND,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
}
@@ -234,9 +241,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
// create a new parser, and try to load something. It will throw,
@@ -255,9 +262,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
// commit it
// The original should be untouched
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
}
@@ -268,13 +275,13 @@ TEST_F(MemoryDatasrcConfigTest, remove) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
delete parser;
parser = createAuthConfigParser(server, "datasources");
EXPECT_NO_THROW(parser->build(Element::fromJSON("[]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(MemoryDatasrcConfigTest, adDuplicateZones) {
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index c68b672..b2d1094 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -93,9 +93,9 @@ const char* const other_zone_rrs =
"mx.delegation.example.com. 3600 IN A 192.0.2.100\n";
// This is a mock Zone class for testing.
-// It is a derived class of Zone for the convenient of tests.
+// It is a derived class of ZoneFinder for the convenient of tests.
// Its find() method emulates the common behavior of protocol compliant
-// zone classes, but simplifies some minor cases and also supports broken
+// ZoneFinder classes, but simplifies some minor cases and also supports broken
// behavior.
// For simplicity, most names are assumed to be "in zone"; there's only
// one zone cut at the point of name "delegation.example.com".
@@ -103,15 +103,16 @@ const char* const other_zone_rrs =
// will result in DNAME.
// This mock zone doesn't handle empty non terminal nodes (if we need to test
// such cases find() should have specialized code for it).
-class MockZone : public Zone {
+class MockZoneFinder : public ZoneFinder {
public:
- MockZone() :
+ MockZoneFinder() :
origin_(Name("example.com")),
delegation_name_("delegation.example.com"),
dname_name_("dname.example.com"),
has_SOA_(true),
has_apex_NS_(true),
- rrclass_(RRClass::IN())
+ rrclass_(RRClass::IN()),
+ include_rrsig_anyway_(false)
{
stringstream zone_stream;
zone_stream << soa_txt << zone_ns_txt << ns_addrs_txt <<
@@ -120,14 +121,14 @@ public:
other_zone_rrs;
masterLoad(zone_stream, origin_, rrclass_,
- boost::bind(&MockZone::loadRRset, this, _1));
+ boost::bind(&MockZoneFinder::loadRRset, this, _1));
}
- virtual const isc::dns::Name& getOrigin() const { return (origin_); }
- virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+ virtual isc::dns::Name getOrigin() const { return (origin_); }
+ virtual isc::dns::RRClass getClass() const { return (rrclass_); }
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
// If false is passed, it makes the zone broken as if it didn't have the
// SOA.
@@ -137,11 +138,18 @@ public:
// the apex NS.
void setApexNSFlag(bool on) { has_apex_NS_ = on; }
+ // Turn this on if you want it to return RRSIGs regardless of FIND_GLUE_OK
+ void setIncludeRRSIGAnyway(bool on) { include_rrsig_anyway_ = on; }
+
+ Name findPreviousName(const Name&) const {
+ isc_throw(isc::NotImplemented, "Mock doesn't support previous name");
+ }
+
private:
typedef map<RRType, ConstRRsetPtr> RRsetStore;
typedef map<Name, RRsetStore> Domains;
Domains domains_;
- void loadRRset(ConstRRsetPtr rrset) {
+ void loadRRset(RRsetPtr rrset) {
domains_[rrset->getName()][rrset->getType()] = rrset;
if (rrset->getName() == delegation_name_ &&
rrset->getType() == RRType::NS()) {
@@ -149,6 +157,26 @@ private:
} else if (rrset->getName() == dname_name_ &&
rrset->getType() == RRType::DNAME()) {
dname_rrset_ = rrset;
+ // Add some signatures
+ } else if (rrset->getName() == Name("example.com.") &&
+ rrset->getType() == RRType::NS()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("NS 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
+ } else if (rrset->getType() == RRType::A()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("A 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
+ } else if (rrset->getType() == RRType::AAAA()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("AAAA 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
}
}
@@ -161,11 +189,12 @@ private:
ConstRRsetPtr delegation_rrset_;
ConstRRsetPtr dname_rrset_;
const RRClass rrclass_;
+ bool include_rrsig_anyway_;
};
-Zone::FindResult
-MockZone::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ZoneFinder::FindResult
+MockZoneFinder::find(const Name& name, const RRType& type,
+ RRsetList* target, const FindOptions options)
{
// Emulating a broken zone: mandatory apex RRs are missing if specifically
// configured so (which are rare cases).
@@ -195,7 +224,26 @@ MockZone::find(const Name& name, const RRType& type,
RRsetStore::const_iterator found_rrset =
found_domain->second.find(type);
if (found_rrset != found_domain->second.end()) {
- return (FindResult(SUCCESS, found_rrset->second));
+ ConstRRsetPtr rrset;
+ // Strip whatever signature there is in case DNSSEC is not required
+ // Just to make sure the Query asks for it when it is needed
+ if (options & ZoneFinder::FIND_DNSSEC ||
+ include_rrsig_anyway_ ||
+ !found_rrset->second->getRRsig()) {
+ rrset = found_rrset->second;
+ } else {
+ RRsetPtr noconst(new RRset(found_rrset->second->getName(),
+ found_rrset->second->getClass(),
+ found_rrset->second->getType(),
+ found_rrset->second->getTTL()));
+ for (RdataIteratorPtr
+ i(found_rrset->second->getRdataIterator());
+ !i->isLast(); i->next()) {
+ noconst->addRdata(i->getCurrent());
+ }
+ rrset = noconst;
+ }
+ return (FindResult(SUCCESS, rrset));
}
// If not found but we have a target, fill it with all RRsets here
@@ -233,11 +281,15 @@ protected:
response.setRcode(Rcode::NOERROR());
response.setOpcode(Opcode::QUERY());
// create and add a matching zone.
- mock_zone = new MockZone();
- memory_datasrc.addZone(ZonePtr(mock_zone));
+ mock_finder = new MockZoneFinder();
+ memory_client.addZone(ZoneFinderPtr(mock_finder));
}
- MockZone* mock_zone;
- MemoryDataSrc memory_datasrc;
+ MockZoneFinder* mock_finder;
+ // We use InMemoryClient here. We could have some kind of mock client
+ // here, but historically, the Query supported only InMemoryClient
+ // (originally named MemoryDataSrc) and was tested with it, so we keep
+ // it like this for now.
+ InMemoryClient memory_client;
const Name qname;
const RRClass qclass;
const RRType qtype;
@@ -286,24 +338,76 @@ responseCheck(Message& response, const isc::dns::Rcode& rcode,
TEST_F(QueryTest, noZone) {
// There's no zone in the memory datasource. So the response should have
// REFUSED.
- MemoryDataSrc empty_memory_datasrc;
- Query nozone_query(empty_memory_datasrc, qname, qtype, response);
+ InMemoryClient empty_memory_client;
+ Query nozone_query(empty_memory_client, qname, qtype, response);
EXPECT_NO_THROW(nozone_query.process());
EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
}
TEST_F(QueryTest, exactMatch) {
- Query query(memory_datasrc, qname, qtype, response);
+ Query query(memory_client, qname, qtype, response);
EXPECT_NO_THROW(query.process());
// find match rrset
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
www_a_txt, zone_ns_txt, ns_addrs_txt);
}
+TEST_F(QueryTest, exactMatchIgnoreSIG) {
+ // Check that we do not include the RRSIG when not requested even when
+ // we receive it from the data source.
+ mock_finder->setIncludeRRSIGAnyway(true);
+ Query query(memory_client, qname, qtype, response);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
+ www_a_txt, zone_ns_txt, ns_addrs_txt);
+}
+
+TEST_F(QueryTest, dnssecPositive) {
+ // Just like exactMatch, but the signatures should be included as well
+ Query query(memory_client, qname, qtype, response, true);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ // We can't let responseCheck to check the additional section as well,
+ // it gets confused by the two RRs for glue.delegation.../RRSIG due
+ // to it's design and fixing it would be hard. Therefore we simply
+ // check manually this one time.
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 4, 6,
+ (www_a_txt + std::string("www.example.com. 3600 IN RRSIG "
+ "A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. "
+ "FAKEFAKEFAKE\n")).c_str(),
+ (zone_ns_txt + std::string("example.com. 3600 IN RRSIG NS 5 "
+ "3 3600 20000101000000 "
+ "20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE\n")).
+ c_str(), NULL);
+ RRsetIterator iterator(response.beginSection(Message::SECTION_ADDITIONAL));
+ const char* additional[] = {
+ "glue.delegation.example.com. 3600 IN A 192.0.2.153\n",
+ "glue.delegation.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n",
+ "glue.delegation.example.com. 3600 IN RRSIG AAAA 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "noglue.example.com. 3600 IN A 192.0.2.53\n",
+ "noglue.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ NULL
+ };
+ for (const char** rr(additional); *rr != NULL; ++ rr) {
+ ASSERT_FALSE(iterator ==
+ response.endSection(Message::SECTION_ADDITIONAL));
+ EXPECT_EQ(*rr, (*iterator)->toText());
+ iterator ++;
+ }
+ EXPECT_TRUE(iterator == response.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(QueryTest, exactAddrMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+ EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
@@ -315,7 +419,7 @@ TEST_F(QueryTest, exactAddrMatch) {
TEST_F(QueryTest, apexNSMatch) {
// find match rrset, omit authority data which has already been provided
// in the answer section from the authority section.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"), RRType::NS(),
+ EXPECT_NO_THROW(Query(memory_client, Name("example.com"), RRType::NS(),
response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 0, 3,
@@ -326,7 +430,7 @@ TEST_F(QueryTest, apexNSMatch) {
TEST_F(QueryTest, exactAnyMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
@@ -339,18 +443,18 @@ TEST_F(QueryTest, exactAnyMatch) {
TEST_F(QueryTest, apexAnyMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 4, 0, 3,
"example.com. 3600 IN SOA . . 0 0 0 0 0\n"
"example.com. 3600 IN NS glue.delegation.example.com.\n"
"example.com. 3600 IN NS noglue.example.com.\n"
"example.com. 3600 IN NS example.net.\n",
- NULL, ns_addrs_txt, mock_zone->getOrigin());
+ NULL, ns_addrs_txt, mock_finder->getOrigin());
}
TEST_F(QueryTest, mxANYMatch) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("mx.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("mx.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
mx_txt, zone_ns_txt,
@@ -358,17 +462,17 @@ TEST_F(QueryTest, mxANYMatch) {
}
TEST_F(QueryTest, glueANYMatch) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
NULL, delegation_txt, ns_addrs_txt);
}
TEST_F(QueryTest, nodomainANY) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
// This tests that when we need to look up Zone's apex NS records for
@@ -376,15 +480,15 @@ TEST_F(QueryTest, nodomainANY) {
// throw in that case.
TEST_F(QueryTest, noApexNS) {
// Disable apex NS record
- mock_zone->setApexNSFlag(false);
+ mock_finder->setApexNSFlag(false);
- EXPECT_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+ EXPECT_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
response).process(), Query::NoApexNS);
// We don't look into the response, as it threw
}
TEST_F(QueryTest, delegation) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
qtype, response).process());
responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
@@ -392,18 +496,18 @@ TEST_F(QueryTest, delegation) {
}
TEST_F(QueryTest, nxdomain) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"), qtype,
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
response).process());
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
TEST_F(QueryTest, nxrrset) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("www.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("www.example.com"),
RRType::TXT(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
/*
@@ -412,22 +516,22 @@ TEST_F(QueryTest, nxrrset) {
*/
TEST_F(QueryTest, noSOA) {
// disable zone's SOA RR.
- mock_zone->setSOAFlag(false);
+ mock_finder->setSOAFlag(false);
// The NX Domain
- EXPECT_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+ EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"),
qtype, response).process(), Query::NoSOA);
// Of course, we don't look into the response, as it throwed
// NXRRSET
- EXPECT_THROW(Query(memory_datasrc, Name("nxrrset.example.com"),
+ EXPECT_THROW(Query(memory_client, Name("nxrrset.example.com"),
qtype, response).process(), Query::NoSOA);
}
TEST_F(QueryTest, noMatchZone) {
// there's a zone in the memory datasource but it doesn't match the qname.
// should result in REFUSED.
- Query(memory_datasrc, Name("example.org"), qtype, response).process();
+ Query(memory_client, Name("example.org"), qtype, response).process();
EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
}
@@ -438,7 +542,7 @@ TEST_F(QueryTest, noMatchZone) {
* A record, other to unknown out of zone one.
*/
TEST_F(QueryTest, MX) {
- Query(memory_datasrc, Name("mx.example.com"), RRType::MX(),
+ Query(memory_client, Name("mx.example.com"), RRType::MX(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
@@ -452,7 +556,7 @@ TEST_F(QueryTest, MX) {
* This should not trigger the additional processing for the exchange.
*/
TEST_F(QueryTest, MXAlias) {
- Query(memory_datasrc, Name("cnamemx.example.com"), RRType::MX(),
+ Query(memory_client, Name("cnamemx.example.com"), RRType::MX(),
response).process();
// there shouldn't be no additional RRs for the exchanges (we have 3
@@ -472,7 +576,7 @@ TEST_F(QueryTest, MXAlias) {
* returned.
*/
TEST_F(QueryTest, CNAME) {
- Query(memory_datasrc, Name("cname.example.com"), RRType::A(),
+ Query(memory_client, Name("cname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -482,7 +586,7 @@ TEST_F(QueryTest, CNAME) {
TEST_F(QueryTest, explicitCNAME) {
// same owner name as the CNAME test but explicitly query for CNAME RR.
// expect the same response as we don't provide a full chain yet.
- Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -494,7 +598,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
// note: with chaining, what should be expected is not trivial:
// BIND 9 returns the CNAME in answer and SOA in authority, no additional.
// NSD returns the CNAME, NS in authority, A/AAAA for NS in additional.
- Query(memory_datasrc, Name("cname.example.com"), RRType::TXT(),
+ Query(memory_client, Name("cname.example.com"), RRType::TXT(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -503,7 +607,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
TEST_F(QueryTest, explicitCNAME_NX_RRSET) {
// same owner name as the NXRRSET test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -517,7 +621,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
// RCODE being NXDOMAIN.
// NSD returns the CNAME, NS in authority, A/AAAA for NS in additional,
// RCODE being NOERROR.
- Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::A(),
+ Query(memory_client, Name("cnamenxdom.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -526,7 +630,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
TEST_F(QueryTest, explicitCNAME_NX_DOMAIN) {
// same owner name as the NXDOMAIN test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cnamenxdom.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -542,7 +646,7 @@ TEST_F(QueryTest, CNAME_OUT) {
* Then the same test should be done with .org included there and
* see what it does (depends on what we want to do)
*/
- Query(memory_datasrc, Name("cnameout.example.com"), RRType::A(),
+ Query(memory_client, Name("cnameout.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -551,7 +655,7 @@ TEST_F(QueryTest, CNAME_OUT) {
TEST_F(QueryTest, explicitCNAME_OUT) {
// same owner name as the OUT test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cnameout.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cnameout.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -567,7 +671,7 @@ TEST_F(QueryTest, explicitCNAME_OUT) {
* pointing to NXRRSET and NXDOMAIN cases (similarly as with CNAME).
*/
TEST_F(QueryTest, DNAME) {
- Query(memory_datasrc, Name("www.dname.example.com"), RRType::A(),
+ Query(memory_client, Name("www.dname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -583,7 +687,7 @@ TEST_F(QueryTest, DNAME) {
* DNAME.
*/
TEST_F(QueryTest, DNAME_ANY) {
- Query(memory_datasrc, Name("www.dname.example.com"), RRType::ANY(),
+ Query(memory_client, Name("www.dname.example.com"), RRType::ANY(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -592,7 +696,7 @@ TEST_F(QueryTest, DNAME_ANY) {
// Test when we ask for DNAME explicitly, it does no synthetizing.
TEST_F(QueryTest, explicitDNAME) {
- Query(memory_datasrc, Name("dname.example.com"), RRType::DNAME(),
+ Query(memory_client, Name("dname.example.com"), RRType::DNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -604,7 +708,7 @@ TEST_F(QueryTest, explicitDNAME) {
* the CNAME, it should return the RRset.
*/
TEST_F(QueryTest, DNAME_A) {
- Query(memory_datasrc, Name("dname.example.com"), RRType::A(),
+ Query(memory_client, Name("dname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -616,11 +720,11 @@ TEST_F(QueryTest, DNAME_A) {
* It should not synthetize the CNAME.
*/
TEST_F(QueryTest, DNAME_NX_RRSET) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("dname.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("dname.example.com"),
RRType::TXT(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
/*
@@ -636,7 +740,7 @@ TEST_F(QueryTest, LongDNAME) {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"dname.example.com.");
- EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+ EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
response).process());
responseCheck(response, Rcode::YXDOMAIN(), AA_FLAG, 1, 0, 0,
@@ -655,7 +759,7 @@ TEST_F(QueryTest, MaxLenDNAME) {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"dname.example.com.");
- EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+ EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
response).process());
// Check the answer is OK
diff --git a/src/bin/auth/tests/run_unittests.cc b/src/bin/auth/tests/run_unittests.cc
index 6ae848d..d3bbab7 100644
--- a/src/bin/auth/tests/run_unittests.cc
+++ b/src/bin/auth/tests/run_unittests.cc
@@ -13,6 +13,8 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
#include <dns/tests/unittest_util.h>
@@ -21,6 +23,7 @@ main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR);
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
+ isc::log::initLogger();
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 062b70d..9a3dded 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -69,13 +69,12 @@ private:
};
protected:
- AuthCountersTest() : verbose_mode_(false), counters(verbose_mode_) {
+ AuthCountersTest() : counters() {
counters.setStatisticsSession(&statistics_session_);
}
~AuthCountersTest() {
}
MockSession statistics_session_;
- bool verbose_mode_;
AuthCounters counters;
};
diff --git a/src/bin/auth/tests/testdata/Makefile.am b/src/bin/auth/tests/testdata/Makefile.am
index f6f1f27..c86722f 100644
--- a/src/bin/auth/tests/testdata/Makefile.am
+++ b/src/bin/auth/tests/testdata/Makefile.am
@@ -23,4 +23,4 @@ EXTRA_DIST += example.com
EXTRA_DIST += example.sqlite3
.spec.wire:
- $(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 254875f..5ec0c9f 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,16 +1,23 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10.pyc
+CLEANFILES = bind10 bind10_src.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
pkglibexecdir = $(libexecdir)/@PACKAGE@
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+noinst_SCRIPTS = run_bind10.sh
+
bind10dir = $(pkgdatadir)
bind10_DATA = bob.spec
EXTRA_DIST = bob.spec
man_MANS = bind10.8
-EXTRA_DIST += $(man_MANS) bind10.xml
+EXTRA_DIST += $(man_MANS) bind10.xml bind10_messages.mes
if ENABLE_MAN
@@ -19,11 +26,20 @@ bind10.8: bind10.xml
endif
+$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10.py
+bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10.py >$@
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
chmod a+x $@
pytest:
$(SHELL) tests/bind10_test
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index d5ab905..1af4f14 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 31, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "March 31, 2011" "BIND10" "BIND10"
+.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -107,6 +107,18 @@ Display more about what is going on for
\fBbind10\fR
and its child processes\&.
.RE
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+bind10\&.boot_time
+.RS 4
+The date and time that the
+\fBbind10\fR
+process started\&. This is represented in ISO 8601 format\&.
+.RE
.SH "SEE ALSO"
.PP
diff --git a/src/bin/bind10/bind10.py.in b/src/bin/bind10/bind10.py.in
deleted file mode 100755
index 648d085..0000000
--- a/src/bin/bind10/bind10.py.in
+++ /dev/null
@@ -1,1039 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
- PREFIX = "@prefix@"
- DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-
-import isc.cc
-import isc.util.process
-import isc.net.parse
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for bind10.boottime of stats module
-_BASETIME = time.gmtime()
-
-class RestartSchedule:
- """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
- * If a process was been running for >=10 seconds, we restart it
- right away.
- * If a process was running for <10 seconds, we wait until 10 seconds
- after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
- def __init__(self, restart_frequency=10.0):
- self.restart_frequency = restart_frequency
- self.run_start_time = None
- self.run_stop_time = None
- self.restart_time = None
-
- def set_run_start_time(self, when=None):
- if when is None:
- when = time.time()
- self.run_start_time = when
- sigma = self.restart_frequency * 0.05
- self.restart_time = when + random.normalvariate(self.restart_frequency,
- sigma)
-
- def set_run_stop_time(self, when=None):
- """We don't actually do anything with stop time now, but it
- might be useful for future algorithms."""
- if when is None:
- when = time.time()
- self.run_stop_time = when
-
- def get_restart_time(self, when=None):
- if when is None:
- when = time.time()
- return max(when, self.restart_time)
-
-class ProcessInfoError(Exception): pass
-
-class ProcessInfo:
- """Information about a process"""
-
- dev_null = open(os.devnull, "w")
-
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False, uid=None, username=None):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.restart_schedule = RestartSchedule()
- self.uid = uid
- self.username = username
- self.process = None
- self.pid = None
-
- def _preexec_work(self):
- """Function used before running a program that needs to run as a
- different user."""
- # First, put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
- # other means).
- os.setpgrp()
- # Second, set the user ID if one has been specified
- if self.uid is not None:
- try:
- posix.setuid(self.uid)
- except OSError as e:
- if e.errno == errno.EPERM:
- # if we failed to change user due to permission report that
- raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
- else:
- # otherwise simply re-raise whatever error we found
- raise
-
- def _spawn(self):
- if self.dev_null_stdout:
- spawn_stdout = self.dev_null
- else:
- spawn_stdout = None
- if self.dev_null_stderr:
- spawn_stderr = self.dev_null
- else:
- spawn_stderr = None
- # Environment variables for the child process will be a copy of those
- # of the boss process with any additional specific variables given
- # on construction (self.env).
- spawn_env = os.environ
- spawn_env.update(self.env)
- if 'B10_FROM_SOURCE' not in os.environ:
- spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
- self.process = subprocess.Popen(self.args,
- stdin=subprocess.PIPE,
- stdout=spawn_stdout,
- stderr=spawn_stderr,
- close_fds=True,
- env=spawn_env,
- preexec_fn=self._preexec_work)
- self.pid = self.process.pid
- self.restart_schedule.set_run_start_time()
-
- # spawn() and respawn() are the same for now, but in the future they
- # may have different functionality
- def spawn(self):
- self._spawn()
-
- def respawn(self):
- self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class BoB:
- """Boss of BIND class."""
-
- def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, brittle=False):
- """
- Initialize the Boss of BIND. This is a singleton (only one can run).
-
- The msgq_socket_file specifies the UNIX domain socket file that the
- msgq process listens on. If verbose is True, then the boss reports
- what it is doing.
-
- Data path and config filename are passed trough to config manager
- (if provided) and specify the config file to be used.
-
- The cmdctl_port is passed to cmdctl and specify on which port it
- should listen.
- """
- self.cc_session = None
- self.ccs = None
- self.cfg_start_auth = True
- self.cfg_start_resolver = False
- self.started_auth_family = False
- self.started_resolver_family = False
- self.curproc = None
- self.dead_processes = {}
- self.msgq_socket_file = msgq_socket_file
- self.nocache = nocache
- self.processes = {}
- self.expected_shutdowns = {}
- self.runnable = False
- self.uid = setuid
- self.username = username
- self.verbose = verbose
- self.data_path = data_path
- self.config_filename = config_filename
- self.cmdctl_port = cmdctl_port
- self.brittle = brittle
-
- def config_handler(self, new_config):
- # If this is initial update, don't do anything now, leave it to startup
- if not self.runnable:
- return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- sys.stderr.write("[bind10] Starting " + name + " as " +
- "a user, not root. This might fail.\n")
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.start_resolver(self.c_channel_env)
- self.started_resolver_family = True
- def resolver_off():
- self.stop_resolver()
- self.started_resolver_family = False
- def auth_on():
- self.start_auth(self.c_channel_env)
- self.start_xfrout(self.c_channel_env)
- self.start_xfrin(self.c_channel_env)
- self.start_zonemgr(self.c_channel_env)
- self.started_auth_family = True
- def auth_off():
- self.stop_zonemgr()
- self.stop_xfrin()
- self.stop_xfrout()
- self.stop_auth()
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
- if self.verbose:
- sys.stdout.write("[bind10] Handling new configuration: " +
- str(new_config) + "\n")
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
-
- def get_processes(self):
- pids = list(self.processes.keys())
- pids.sort()
- process_list = [ ]
- for pid in pids:
- process_list.append([pid, self.processes[pid].name])
- return process_list
-
- def command_handler(self, command, args):
- if self.verbose:
- sys.stdout.write("[bind10] Boss got command: " + str(command) + "\n")
- answer = isc.config.ccsession.create_answer(1, "command not implemented")
- if type(command) != str:
- answer = isc.config.ccsession.create_answer(1, "bad command")
- else:
- if command == "shutdown":
- self.runnable = False
- answer = isc.config.ccsession.create_answer(0)
- elif command == "sendstats":
- # send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }})
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- self.cc_session.group_recvmsg(True, seq)
- answer = isc.config.ccsession.create_answer(0)
- elif command == "ping":
- answer = isc.config.ccsession.create_answer(0, "pong")
- elif command == "show_processes":
- answer = isc.config.ccsession. \
- create_answer(0, self.get_processes())
- else:
- answer = isc.config.ccsession.create_answer(1,
- "Unknown command")
- return answer
-
- def kill_started_processes(self):
- """
- Called as part of the exception handling when a process fails to
- start, this runs through the list of started processes, killing
- each one. It then clears that list.
- """
- if self.verbose:
- sys.stdout.write("[bind10] killing started processes:\n")
-
- for pid in self.processes:
- if self.verbose:
- sys.stdout.write("[bind10] - %s\n" % self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
-
- def read_bind10_config(self):
- """
- Reads the parameters associated with the BoB module itself.
-
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
- """
- if self.verbose:
- sys.stdout.write("[bind10] Reading Boss configuration:\n")
-
- config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- if self.verbose:
- sys.stdout.write("[bind10] - start_auth: %s\n" %
- str(self.cfg_start_auth))
- sys.stdout.write("[bind10] - start_resolver: %s\n" %
- str(self.cfg_start_resolver))
-
- def log_starting(self, process, port = None, address = None):
- """
- A convenience function to output a "Starting xxx" message if the
- verbose option is set. Putting this into a separate method ensures
- that the output form is consistent across all processes.
-
- The process name (passed as the first argument) is put into
- self.curproc, and is used to indicate which process failed to
- start if there is an error (and is used in the "Started" message
- on success). The optional port and address information are
- appended to the message (if present).
- """
- self.curproc = process
- if self.verbose:
- sys.stdout.write("[bind10] Starting %s" % self.curproc)
- if port is not None:
- sys.stdout.write(" on port %d" % port)
- if address is not None:
- sys.stdout.write(" (address %s)" % str(address))
- sys.stdout.write("\n")
-
- def log_started(self, pid = None):
- """
- A convenience function to output a 'Started xxxx (PID yyyy)'
- message. As with starting_message(), this ensures a consistent
- format.
- """
- if self.verbose:
- sys.stdout.write("[bind10] Started %s" % self.curproc)
- if pid is not None:
- sys.stdout.write(" (PID %d)" % pid)
- sys.stdout.write("\n")
-
- # The next few methods start the individual processes of BIND-10. They
- # are called via start_all_processes(). If any fail, an exception is
- # raised which is caught by the caller of start_all_processes(); this kills
- # processes started up to that point before terminating the program.
-
- def start_msgq(self, c_channel_env):
- """
- Start the message queue and connect to the command channel.
- """
- self.log_starting("b10-msgq")
- c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
- True, not self.verbose, uid=self.uid,
- username=self.username)
- c_channel.spawn()
- self.processes[c_channel.pid] = c_channel
- self.log_started(c_channel.pid)
-
- # Now connect to the c-channel
- cc_connect_start = time.time()
- while self.cc_session is None:
- # if we have been trying for "a while" give up
- if (time.time() - cc_connect_start) > 5:
- raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- except isc.cc.session.SessionError:
- time.sleep(0.1)
-
- def start_cfgmgr(self, c_channel_env):
- """
- Starts the configuration manager process
- """
- self.log_starting("b10-cfgmgr")
- args = ["b10-cfgmgr"]
- if self.data_path is not None:
- args.append("--data-path=" + self.data_path)
- if self.config_filename is not None:
- args.append("--config-filename=" + self.config_filename)
- bind_cfgd = ProcessInfo("b10-cfgmgr", args,
- c_channel_env, uid=self.uid,
- username=self.username)
- bind_cfgd.spawn()
- self.processes[bind_cfgd.pid] = bind_cfgd
- self.log_started(bind_cfgd.pid)
-
- # sleep until b10-cfgmgr is fully up and running, this is a good place
- # to have a (short) timeout on synchronized groupsend/receive
- # TODO: replace the sleep by a listen for ConfigManager started
- # message
- time.sleep(1)
-
- def start_ccsession(self, c_channel_env):
- """
- Start the CC Session
-
- The argument c_channel_env is unused but is supplied to keep the
- argument list the same for all start_xxx methods.
- """
- self.log_starting("ccsession")
- self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler, self.command_handler)
- self.ccs.start()
- self.log_started()
-
- # A couple of utility methods for starting processes...
-
- def start_process(self, name, args, c_channel_env, port=None, address=None):
- """
- Given a set of command arguments, start the process and output
- appropriate log messages. If the start is successful, the process
- is added to the list of started processes.
-
- The port and address arguments are for log messages only.
- """
- self.log_starting(name, port, address)
- newproc = ProcessInfo(name, args, c_channel_env)
- newproc.spawn()
- self.processes[newproc.pid] = newproc
- self.log_started(newproc.pid)
-
- def start_simple(self, name, c_channel_env, port=None, address=None):
- """
- Most of the BIND-10 processes are started with the command:
-
- <process-name> [-v]
-
- ... where -v is appended if verbose is enabled. This method
- generates the arguments from the name and starts the process.
-
- The port and address arguments are for log messages only.
- """
- # Set up the command arguments.
- args = [name]
- if self.verbose:
- args += ['-v']
-
- # ... and start the process
- self.start_process(name, args, c_channel_env, port, address)
-
- # The next few methods start up the rest of the BIND-10 processes.
- # Although many of these methods are little more than a call to
- # start_simple, they are retained (a) for testing reasons and (b) as a place
- # where modifications can be made if the process start-up sequence changes
- # for a given process.
-
- def start_auth(self, c_channel_env):
- """
- Start the Authoritative server
- """
- authargs = ['b10-auth']
- if self.nocache:
- authargs += ['-n']
- if self.uid:
- authargs += ['-u', str(self.uid)]
- if self.verbose:
- authargs += ['-v']
-
- # ... and start
- self.start_process("b10-auth", authargs, c_channel_env)
-
- def start_resolver(self, c_channel_env):
- """
- Start the Resolver. At present, all these arguments and switches
- are pure speculation. As with the auth daemon, they should be
- read from the configuration database.
- """
- self.curproc = "b10-resolver"
- # XXX: this must be read from the configuration manager in the future
- resargs = ['b10-resolver']
- if self.uid:
- resargs += ['-u', str(self.uid)]
- if self.verbose:
- resargs += ['-v']
-
- # ... and start
- self.start_process("b10-resolver", resargs, c_channel_env)
-
- def start_xfrout(self, c_channel_env):
- self.start_simple("b10-xfrout", c_channel_env)
-
- def start_xfrin(self, c_channel_env):
- self.start_simple("b10-xfrin", c_channel_env)
-
- def start_zonemgr(self, c_channel_env):
- self.start_simple("b10-zonemgr", c_channel_env)
-
- def start_stats(self, c_channel_env):
- self.start_simple("b10-stats", c_channel_env)
-
- def start_stats_httpd(self, c_channel_env):
- self.start_simple("b10-stats-httpd", c_channel_env)
-
- def start_cmdctl(self, c_channel_env):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
-
- def start_all_processes(self):
- """
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
- """
- c_channel_env = self.c_channel_env
- self.start_msgq(c_channel_env)
- self.start_cfgmgr(c_channel_env)
- self.start_ccsession(c_channel_env)
-
- # Extract the parameters associated with Bob. This can only be
- # done after the CC Session is started.
- self.read_bind10_config()
-
- # Continue starting the processes. The authoritative server (if
- # selected):
- if self.cfg_start_auth:
- self.start_auth(c_channel_env)
-
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- self.start_resolver(c_channel_env)
- self.started_resolver_family = True
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- self.start_xfrout(c_channel_env)
- self.start_xfrin(c_channel_env)
- self.start_zonemgr(c_channel_env)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- self.start_stats(c_channel_env)
- self.start_stats_httpd(c_channel_env)
- self.start_cmdctl(c_channel_env)
-
- def startup(self):
- """
- Start the BoB instance.
-
- Returns None if successful, otherwise an string describing the
- problem.
- """
- # Try to connect to the c-channel daemon, to see if it is already
- # running
- c_channel_env = {}
- if self.msgq_socket_file is not None:
- c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
- if self.verbose:
- sys.stdout.write("[bind10] Checking for already running b10-msgq\n")
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- return "b10-msgq already running, or socket file not cleaned , cannot start"
- except isc.cc.session.SessionError:
- # this is the case we want, where the msgq is not running
- pass
-
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
- try:
- self.c_channel_env = c_channel_env
- self.start_all_processes()
- except Exception as e:
- self.kill_started_processes()
- return "Unable to start " + self.curproc + ": " + str(e)
-
- # Started successfully
- self.runnable = True
- return None
-
- def stop_all_processes(self):
- """Stop all processes."""
- cmd = { "command": ['shutdown']}
-
- self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
- self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
- self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
- self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
- self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
- self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
- self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
- self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
- self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
-
- def stop_process(self, process, recipient):
- """
- Stop the given process, friendly-like. The process is the name it has
- (in logs, etc), the recipient is the address on msgq.
- """
- if self.verbose:
- sys.stdout.write("[bind10] Asking %s to terminate\n" % process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
- self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
- recipient)
-
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
-
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
-
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
-
- def shutdown(self):
- """Stop the BoB instance."""
- if self.verbose:
- sys.stdout.write("[bind10] Stopping the server.\n")
- # first try using the BIND 10 request to stop
- try:
- self.stop_all_processes()
- except:
- pass
- # XXX: some delay probably useful... how much is uncertain
- # I have changed the delay from 0.5 to 1, but sometime it's
- # still not enough.
- time.sleep(1)
- self.reap_children()
- # next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- if self.verbose:
- sys.stdout.write("[bind10] Sending SIGTERM to %s (PID %d).\n" %
- (proc_info.name, proc_info.pid))
- try:
- proc_info.process.terminate()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- # finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- if self.verbose:
- sys.stdout.write("[bind10] Sending SIGKILL to %s (PID %d).\n" %
- (proc_info.name, proc_info.pid))
- try:
- proc_info.process.kill()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- if self.verbose:
- sys.stdout.write("[bind10] All processes ended, server done.\n")
-
- def _get_process_exit_status(self):
- return os.waitpid(-1, os.WNOHANG)
-
- def reap_children(self):
- """Check to see if any of our child processes have exited,
- and note this for later handling.
- """
- while True:
- try:
- (pid, exit_status) = self._get_process_exit_status()
- except OSError as o:
- if o.errno == errno.ECHILD: break
- # XXX: should be impossible to get any other error here
- raise
- if pid == 0: break
- if pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- sys.stdout.write(
- "[bind10] Process %s (PID %d) died: exit status not available" %
- (proc_info.name, proc_info.pid))
- else:
- sys.stdout.write(
- "[bind10] Process %s (PID %d) terminated, exit status = %d\n" %
- (proc_info.name, proc_info.pid, exit_status))
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- sys.stdout.write(
- "[bind10] The b10-msgq process died, shutting down.\n")
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
- else:
- sys.stdout.write("[bind10] Unknown child pid %d exited.\n" % pid)
-
- def restart_processes(self):
- """
- Restart any dead processes:
-
- * Returns the time when the next process is ready to be restarted.
- * If the server is shutting down, returns 0.
- * If there are no processes, returns None.
-
- The values returned can be safely passed into select() as the
- timeout value.
- """
- next_restart = None
- # if we're shutting down, then don't restart
- if not self.runnable:
- return 0
- # otherwise look through each dead process and try to restart
- still_dead = {}
- now = time.time()
- for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
- restart_time = proc_info.restart_schedule.get_restart_time(now)
- if restart_time > now:
- if (next_restart is None) or (next_restart > restart_time):
- next_restart = restart_time
- still_dead[proc_info.pid] = proc_info
- else:
- if self.verbose:
- sys.stdout.write("[bind10] Resurrecting dead %s process...\n" %
- proc_info.name)
- try:
- proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
- sys.stdout.write("[bind10] Resurrected %s (PID %d)\n" %
- (proc_info.name, proc_info.pid))
- except:
- still_dead[proc_info.pid] = proc_info
- # remember any processes that refuse to be resurrected
- self.dead_processes = still_dead
- # return the time when the next process is ready to be restarted
- return next_restart
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
- """A child process has died (SIGCHLD received)."""
- # don't do anything...
- # the Python signal handler has been set up to write
- # down a pipe, waking up our select() bit
- pass
-
-def get_signame(signal_number):
- """Return the symbolic name for a signal."""
- for sig in dir(signal):
- if sig.startswith("SIG") and sig[3].isalnum():
- if getattr(signal, sig) == signal_number:
- return sig
- return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
- """We need to exit (SIGINT or SIGTERM received)."""
- global options
- global boss_of_bind
- if options.verbose:
- sys.stdout.write("[bind10] Received %s.\n" % get_signame(signal_number))
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
- """Function that renames the process if it is requested by a option."""
- isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
- """
- Function for parsing command line arguments. Returns the
- options object from OptionParser.
- """
- parser = Parser(version=VERSION)
- parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
- type="string", default=None,
- help="UNIX domain socket file the b10-msgq daemon will use")
- parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
- default=False, help="disable hot-spot cache in authoritative DNS server")
- parser.add_option("-u", "--user", dest="user", type="string", default=None,
- help="Change user after startup (must run as root)")
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
- parser.add_option("--pretty-name", type="string", action="callback",
- callback=process_rename,
- help="Set the process name (displayed in ps, top, ...)")
- parser.add_option("-c", "--config-file", action="store",
- dest="config_file", default=None,
- help="Configuration database filename")
- parser.add_option("-p", "--data-path", dest="data_path",
- help="Directory to search for configuration files",
- default=None)
- parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
- default=None, help="Port of command control")
- parser.add_option("--pid-file", dest="pid_file", type="string",
- default=None,
- help="file to dump the PID of the BIND 10 process")
- parser.add_option("--brittle", dest="brittle", action="store_true",
- help="debugging flag: exit if any component dies")
-
- (options, args) = parser.parse_args(args)
-
- if options.cmdctl_port is not None:
- try:
- isc.net.parse.port_parse(options.cmdctl_port)
- except ValueError as e:
- parser.error(e)
-
- if args:
- parser.print_help()
- sys.exit(1)
-
- return options
-
-def dump_pid(pid_file):
- """
- Dump the PID of the current process to the specified file. If the given
- file is None this function does nothing. If the file already exists,
- the existing content will be removed. If a system error happens in
- creating or writing to the file, the corresponding exception will be
- propagated to the caller.
- """
- if pid_file is None:
- return
- f = open(pid_file, "w")
- f.write('%d\n' % os.getpid())
- f.close()
-
-def unlink_pid_file(pid_file):
- """
- Remove the given file, which is basically expected to be the PID file
- created by dump_pid(). The specified may or may not exist; if it
- doesn't this function does nothing. Other system level errors in removing
- the file will be propagated as the corresponding exception.
- """
- if pid_file is None:
- return
- try:
- os.unlink(pid_file)
- except OSError as error:
- if error.errno is not errno.ENOENT:
- raise
-
-
-def main():
- global options
- global boss_of_bind
- # Enforce line buffering on stdout, even when not a TTY
- sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
- options = parse_args()
-
- # Check user ID.
- setuid = None
- username = None
- if options.user:
- # Try getting information about the user, assuming UID passed.
- try:
- pw_ent = pwd.getpwuid(int(options.user))
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except ValueError:
- pass
- except KeyError:
- pass
-
- # Next try getting information about the user, assuming user name
- # passed.
- # If the information is both a valid user name and user number, we
- # prefer the name because we try it second. A minor point, hopefully.
- try:
- pw_ent = pwd.getpwnam(options.user)
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except KeyError:
- pass
-
- if setuid is None:
- sys.stderr.write("bind10: invalid user: '%s'\n" % options.user)
- sys.exit(1)
-
- # Announce startup.
- if options.verbose:
- sys.stdout.write("%s\n" % VERSION)
-
- # Create wakeup pipe for signal handlers
- wakeup_pipe = os.pipe()
- signal.set_wakeup_fd(wakeup_pipe[1])
-
- # Set signal handlers for catching child termination, as well
- # as our own demise.
- signal.signal(signal.SIGCHLD, reaper)
- signal.siginterrupt(signal.SIGCHLD, False)
- signal.signal(signal.SIGINT, fatal_signal)
- signal.signal(signal.SIGTERM, fatal_signal)
-
- # Block SIGPIPE, as we don't want it to end this process
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port, options.brittle)
- startup_result = boss_of_bind.startup()
- if startup_result:
- sys.stderr.write("[bind10] Error on startup: %s\n" % startup_result)
- sys.exit(1)
- sys.stdout.write("[bind10] BIND 10 started\n")
- dump_pid(options.pid_file)
-
- # In our main loop, we check for dead processes or messages
- # on the c-channel.
- wakeup_fd = wakeup_pipe[0]
- ccs_fd = boss_of_bind.ccs.get_socket().fileno()
- while boss_of_bind.runnable:
- # clean up any processes that exited
- boss_of_bind.reap_children()
- next_restart = boss_of_bind.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- sys.stderr.write("[bind10] Error with select(); %s\n" % err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- boss_of_bind.ccs.check_command()
- except isc.cc.session.ProtocolError:
- if options.verbose:
- sys.stderr.write("[bind10] msgq channel disappeared.\n")
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- sys.stdout.write("[bind10] BIND 10 exiting\n");
- unlink_pid_file(options.pid_file)
- sys.exit(0)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..b101ba8 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 31, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -217,6 +217,30 @@ The default is the basename of ARG 0.
<!--
TODO: configuration section
-->
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>bind10.boot_time</term>
+ <listitem><para>
+ The date and time that the <command>bind10</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
<!--
<refsect1>
<title>FILES</title>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
new file mode 100644
index 0000000..4bac069
--- /dev/null
+++ b/src/bin/bind10/bind10_messages.mes
@@ -0,0 +1,200 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+
+% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+
+% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
+This message shows whether or not the resolver should be
+started according to the configuration.
+
+% BIND10_INVALID_USER invalid user: %1
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
+% BIND10_KILL_PROCESS killing process %1
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+
+% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+
+% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+
+% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+
+% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+
+% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+
+% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+
+% BIND10_RECEIVED_COMMAND received command: %1
+The boss module received a command and shall now process it. The command
+is printed.
+
+% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+
+% BIND10_RECEIVED_SIGNAL received signal %1
+The boss module received the given signal.
+
+% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
+The given process has been restarted successfully, and is now running
+with the given process id.
+
+% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
+The given process has ended unexpectedly, and is now restarted.
+
+% BIND10_SELECT_ERROR error in select() call: %1
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+
+% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
+The boss module is sending a SIGKILL signal to the given process.
+
+% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
+The boss module is sending a SIGTERM signal to the given process.
+
+% BIND10_SHUTDOWN stopping the server
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+
+% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
+All child processes have been stopped, and the boss process will now
+stop itself.
+
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The boss module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The boss module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The boss forwards a request for a socket to the socket creator.
+
+% BIND10_STARTED_PROCESS started %1
+The given process has successfully been started.
+
+% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
+The given process has successfully been started, and has the given PID.
+
+% BIND10_STARTING starting BIND10: %1
+Informational message on startup that shows the full version.
+
+% BIND10_STARTING_PROCESS starting process %1
+The boss module is starting the given process.
+
+% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
+The boss module is starting the given process, which will listen on the
+given port number.
+
+% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+
+% BIND10_STARTUP_COMPLETE BIND 10 started
+All modules have been successfully started, and BIND 10 is now running.
+
+% BIND10_STARTUP_ERROR error during startup: %1
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+
+% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_STOP_PROCESS asking %1 to shut down
+The boss module is sending a shutdown command to the given module over
+the message channel.
+
+% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
new file mode 100755
index 0000000..28af8cc
--- /dev/null
+++ b/src/bin/bind10/bind10_src.py.in
@@ -0,0 +1,1078 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the Boss of Bind (BoB, or bob) program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the BoB class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
+else:
+ PREFIX = "@prefix@"
+ DATAROOTDIR = "@datarootdir@"
+ SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+from isc.log_messages.bind10_messages import *
+import isc.bind10.sockcreator
+
+isc.log.init("b10-boss")
+logger = isc.log.Logger("boss")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = 10
+DBG_COMMANDS = 30
+
+# Assign this process some longer name
+isc.util.process.rename(sys.argv[0])
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for bind10.boottime of stats module
+_BASETIME = time.gmtime()
+
+class RestartSchedule:
+ """
+Keeps state when restarting something (in this case, a process).
+
+When a process dies unexpectedly, we need to restart it. However, if
+it fails to restart for some reason, then we should not simply keep
+restarting it at high speed.
+
+A more sophisticated algorithm can be developed, but for now we choose
+a simple set of rules:
+
+ * If a process was been running for >=10 seconds, we restart it
+ right away.
+ * If a process was running for <10 seconds, we wait until 10 seconds
+ after it was started.
+
+To avoid programs getting into lockstep, we use a normal distribution
+to avoid being restarted at exactly 10 seconds."""
+
+ def __init__(self, restart_frequency=10.0):
+ self.restart_frequency = restart_frequency
+ self.run_start_time = None
+ self.run_stop_time = None
+ self.restart_time = None
+
+ def set_run_start_time(self, when=None):
+ if when is None:
+ when = time.time()
+ self.run_start_time = when
+ sigma = self.restart_frequency * 0.05
+ self.restart_time = when + random.normalvariate(self.restart_frequency,
+ sigma)
+
+ def set_run_stop_time(self, when=None):
+ """We don't actually do anything with stop time now, but it
+ might be useful for future algorithms."""
+ if when is None:
+ when = time.time()
+ self.run_stop_time = when
+
+ def get_restart_time(self, when=None):
+ if when is None:
+ when = time.time()
+ return max(when, self.restart_time)
+
+class ProcessInfoError(Exception): pass
+
+class ProcessInfo:
+ """Information about a process"""
+
+ dev_null = open(os.devnull, "w")
+
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False, uid=None, username=None):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.restart_schedule = RestartSchedule()
+ self.uid = uid
+ self.username = username
+ self.process = None
+ self.pid = None
+
+ def _preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # First, put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+ # Second, set the user ID if one has been specified
+ if self.uid is not None:
+ try:
+ posix.setuid(self.uid)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # if we failed to change user due to permission report that
+ raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
+ else:
+ # otherwise simply re-raise whatever error we found
+ raise
+
+ def _spawn(self):
+ if self.dev_null_stdout:
+ spawn_stdout = self.dev_null
+ else:
+ spawn_stdout = None
+ if self.dev_null_stderr:
+ spawn_stderr = self.dev_null
+ else:
+ spawn_stderr = None
+ # Environment variables for the child process will be a copy of those
+ # of the boss process with any additional specific variables given
+ # on construction (self.env).
+ spawn_env = os.environ
+ spawn_env.update(self.env)
+ if 'B10_FROM_SOURCE' not in os.environ:
+ spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
+ self.process = subprocess.Popen(self.args,
+ stdin=subprocess.PIPE,
+ stdout=spawn_stdout,
+ stderr=spawn_stderr,
+ close_fds=True,
+ env=spawn_env,
+ preexec_fn=self._preexec_work)
+ self.pid = self.process.pid
+ self.restart_schedule.set_run_start_time()
+
+ # spawn() and respawn() are the same for now, but in the future they
+ # may have different functionality
+ def spawn(self):
+ self._spawn()
+
+ def respawn(self):
+ self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class BoB:
+ """Boss of BIND class."""
+
+ def __init__(self, msgq_socket_file=None, data_path=None,
+ config_filename=None, nocache=False, verbose=False, setuid=None,
+ username=None, cmdctl_port=None, brittle=False):
+ """
+ Initialize the Boss of BIND. This is a singleton (only one can run).
+
+ The msgq_socket_file specifies the UNIX domain socket file that the
+ msgq process listens on. If verbose is True, then the boss reports
+ what it is doing.
+
+ Data path and config filename are passed trough to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
+ """
+ self.cc_session = None
+ self.ccs = None
+ self.cfg_start_auth = True
+ self.cfg_start_resolver = False
+ self.cfg_start_dhcp6 = False
+ self.cfg_start_dhcp4 = False
+ self.started_auth_family = False
+ self.started_resolver_family = False
+ self.curproc = None
+ self.dead_processes = {}
+ self.msgq_socket_file = msgq_socket_file
+ self.nocache = nocache
+ self.processes = {}
+ self.expected_shutdowns = {}
+ self.runnable = False
+ self.uid = setuid
+ self.username = username
+ self.verbose = verbose
+ self.data_path = data_path
+ self.config_filename = config_filename
+ self.cmdctl_port = cmdctl_port
+ self.brittle = brittle
+ self.sockcreator = None
+
+ def config_handler(self, new_config):
+ # If this is initial update, don't do anything now, leave it to startup
+ if not self.runnable:
+ return
+ # Now we declare few functions used only internally here. Besides the
+ # benefit of not polluting the name space, they are closures, so we
+ # don't need to pass some variables
+ def start_stop(name, started, start, stop):
+ if not'start_' + name in new_config:
+ return
+ if new_config['start_' + name]:
+ if not started:
+ if self.uid is not None:
+ logger.info(BIND10_START_AS_NON_ROOT, name)
+ start()
+ else:
+ stop()
+ # These four functions are passed to start_stop (smells like functional
+ # programming little bit)
+ def resolver_on():
+ self.start_resolver(self.c_channel_env)
+ self.started_resolver_family = True
+ def resolver_off():
+ self.stop_resolver()
+ self.started_resolver_family = False
+ def auth_on():
+ self.start_auth(self.c_channel_env)
+ self.start_xfrout(self.c_channel_env)
+ self.start_xfrin(self.c_channel_env)
+ self.start_zonemgr(self.c_channel_env)
+ self.started_auth_family = True
+ def auth_off():
+ self.stop_zonemgr()
+ self.stop_xfrin()
+ self.stop_xfrout()
+ self.stop_auth()
+ self.started_auth_family = False
+
+ # The real code of the config handler function follows here
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+ new_config)
+ start_stop('resolver', self.started_resolver_family, resolver_on,
+ resolver_off)
+ start_stop('auth', self.started_auth_family, auth_on, auth_off)
+
+ answer = isc.config.ccsession.create_answer(0)
+ return answer
+
+ def get_processes(self):
+ pids = list(self.processes.keys())
+ pids.sort()
+ process_list = [ ]
+ for pid in pids:
+ process_list.append([pid, self.processes[pid].name])
+ return process_list
+
+ def _get_stats_data(self):
+ return { "stats_data": {
+ 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }}
+
+ def command_handler(self, command, args):
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+ answer = isc.config.ccsession.create_answer(1, "command not implemented")
+ if type(command) != str:
+ answer = isc.config.ccsession.create_answer(1, "bad command")
+ else:
+ if command == "shutdown":
+ self.runnable = False
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "getstats":
+ answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
+ elif command == "sendstats":
+ # send statistics data to the stats daemon immediately
+ cmd = isc.config.ccsession.create_command(
+ 'set', self._get_stats_data())
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ # Consume the answer, in case it becomes a orphan message.
+ try:
+ self.cc_session.group_recvmsg(False, seq)
+ except isc.cc.session.SessionTimeout:
+ pass
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "ping":
+ answer = isc.config.ccsession.create_answer(0, "pong")
+ elif command == "show_processes":
+ answer = isc.config.ccsession. \
+ create_answer(0, self.get_processes())
+ else:
+ answer = isc.config.ccsession.create_answer(1,
+ "Unknown command")
+ return answer
+
+ def start_creator(self):
+ self.curproc = 'b10-sockcreator'
+ self.sockcreator = isc.bind10.sockcreator.Creator("@@LIBEXECDIR@@:" +
+ os.environ['PATH'])
+
+ def stop_creator(self, kill=False):
+ if self.sockcreator is None:
+ return
+ if kill:
+ self.sockcreator.kill()
+ else:
+ self.sockcreator.terminate()
+ self.sockcreator = None
+
+ def kill_started_processes(self):
+ """
+ Called as part of the exception handling when a process fails to
+ start, this runs through the list of started processes, killing
+ each one. It then clears that list.
+ """
+ logger.info(BIND10_KILLING_ALL_PROCESSES)
+
+ self.stop_creator(True)
+
+ for pid in self.processes:
+ logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
+ self.processes[pid].process.kill()
+ self.processes = {}
+
+ def read_bind10_config(self):
+ """
+ Reads the parameters associated with the BoB module itself.
+
+ At present these are the components to start although arguably this
+ information should be in the configuration for the appropriate
+ module itself. (However, this would cause difficulty in the case of
+ xfrin/xfrout and zone manager as we don't need to start those if we
+ are not running the authoritative server.)
+ """
+ logger.info(BIND10_READING_BOSS_CONFIGURATION)
+
+ config_data = self.ccs.get_full_config()
+ self.cfg_start_auth = config_data.get("start_auth")
+ self.cfg_start_resolver = config_data.get("start_resolver")
+
+ logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
+ logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+
+ def log_starting(self, process, port = None, address = None):
+ """
+ A convenience function to output a "Starting xxx" message if the
+ logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+ Putting this into a separate method ensures
+ that the output form is consistent across all processes.
+
+ The process name (passed as the first argument) is put into
+ self.curproc, and is used to indicate which process failed to
+ start if there is an error (and is used in the "Started" message
+ on success). The optional port and address information are
+ appended to the message (if present).
+ """
+ self.curproc = process
+ if port is None and address is None:
+ logger.info(BIND10_STARTING_PROCESS, self.curproc)
+ elif address is None:
+ logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+ port)
+ else:
+ logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+ self.curproc, address, port)
+
+ def log_started(self, pid = None):
+ """
+ A convenience function to output a 'Started xxxx (PID yyyy)'
+ message. As with starting_message(), this ensures a consistent
+ format.
+ """
+ if pid is None:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+ else:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+ # The next few methods start the individual processes of BIND-10. They
+ # are called via start_all_processes(). If any fail, an exception is
+ # raised which is caught by the caller of start_all_processes(); this kills
+ # processes started up to that point before terminating the program.
+
+ def start_msgq(self, c_channel_env):
+ """
+ Start the message queue and connect to the command channel.
+ """
+ self.log_starting("b10-msgq")
+ c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
+ True, not self.verbose, uid=self.uid,
+ username=self.username)
+ c_channel.spawn()
+ self.processes[c_channel.pid] = c_channel
+ self.log_started(c_channel.pid)
+
+ # Now connect to the c-channel
+ cc_connect_start = time.time()
+ while self.cc_session is None:
+ # if we have been trying for "a while" give up
+ if (time.time() - cc_connect_start) > 5:
+ raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ except isc.cc.session.SessionError:
+ time.sleep(0.1)
+
+ def start_cfgmgr(self, c_channel_env):
+ """
+ Starts the configuration manager process
+ """
+ self.log_starting("b10-cfgmgr")
+ args = ["b10-cfgmgr"]
+ if self.data_path is not None:
+ args.append("--data-path=" + self.data_path)
+ if self.config_filename is not None:
+ args.append("--config-filename=" + self.config_filename)
+ bind_cfgd = ProcessInfo("b10-cfgmgr", args,
+ c_channel_env, uid=self.uid,
+ username=self.username)
+ bind_cfgd.spawn()
+ self.processes[bind_cfgd.pid] = bind_cfgd
+ self.log_started(bind_cfgd.pid)
+
+ # sleep until b10-cfgmgr is fully up and running, this is a good place
+ # to have a (short) timeout on synchronized groupsend/receive
+ # TODO: replace the sleep by a listen for ConfigManager started
+ # message
+ time.sleep(1)
+
+ def start_ccsession(self, c_channel_env):
+ """
+ Start the CC Session
+
+ The argument c_channel_env is unused but is supplied to keep the
+ argument list the same for all start_xxx methods.
+ """
+ self.log_starting("ccsession")
+ self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.ccs.start()
+ self.log_started()
+
+ # A couple of utility methods for starting processes...
+
+ def start_process(self, name, args, c_channel_env, port=None, address=None):
+ """
+ Given a set of command arguments, start the process and output
+ appropriate log messages. If the start is successful, the process
+ is added to the list of started processes.
+
+ The port and address arguments are for log messages only.
+ """
+ self.log_starting(name, port, address)
+ newproc = ProcessInfo(name, args, c_channel_env)
+ newproc.spawn()
+ self.processes[newproc.pid] = newproc
+ self.log_started(newproc.pid)
+
+ def start_simple(self, name, c_channel_env, port=None, address=None):
+ """
+ Most of the BIND-10 processes are started with the command:
+
+ <process-name> [-v]
+
+ ... where -v is appended if verbose is enabled. This method
+ generates the arguments from the name and starts the process.
+
+ The port and address arguments are for log messages only.
+ """
+ # Set up the command arguments.
+ args = [name]
+ if self.verbose:
+ args += ['-v']
+
+ # ... and start the process
+ self.start_process(name, args, c_channel_env, port, address)
+
+ # The next few methods start up the rest of the BIND-10 processes.
+ # Although many of these methods are little more than a call to
+ # start_simple, they are retained (a) for testing reasons and (b) as a place
+ # where modifications can be made if the process start-up sequence changes
+ # for a given process.
+
+ def start_auth(self, c_channel_env):
+ """
+ Start the Authoritative server
+ """
+ authargs = ['b10-auth']
+ if self.nocache:
+ authargs += ['-n']
+ if self.uid:
+ authargs += ['-u', str(self.uid)]
+ if self.verbose:
+ authargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-auth", authargs, c_channel_env)
+
+ def start_resolver(self, c_channel_env):
+ """
+ Start the Resolver. At present, all these arguments and switches
+ are pure speculation. As with the auth daemon, they should be
+ read from the configuration database.
+ """
+ self.curproc = "b10-resolver"
+ # XXX: this must be read from the configuration manager in the future
+ resargs = ['b10-resolver']
+ if self.uid:
+ resargs += ['-u', str(self.uid)]
+ if self.verbose:
+ resargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-resolver", resargs, c_channel_env)
+
+ def start_xfrout(self, c_channel_env):
+ self.start_simple("b10-xfrout", c_channel_env)
+
+ def start_xfrin(self, c_channel_env):
+ self.start_simple("b10-xfrin", c_channel_env)
+
+ def start_zonemgr(self, c_channel_env):
+ self.start_simple("b10-zonemgr", c_channel_env)
+
+ def start_stats(self, c_channel_env):
+ self.start_simple("b10-stats", c_channel_env)
+
+ def start_stats_httpd(self, c_channel_env):
+ self.start_simple("b10-stats-httpd", c_channel_env)
+
+ def start_dhcp6(self, c_channel_env):
+ self.start_simple("b10-dhcp6", c_channel_env)
+
+ def start_cmdctl(self, c_channel_env):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
+
+ def start_all_processes(self):
+ """
+ Starts up all the processes. Any exception generated during the
+ starting of the processes is handled by the caller.
+ """
+ # The socket creator first, as it is the only thing that needs root
+ self.start_creator()
+ # TODO: Once everything uses the socket creator, we can drop root
+ # privileges right now
+
+ c_channel_env = self.c_channel_env
+ self.start_msgq(c_channel_env)
+ self.start_cfgmgr(c_channel_env)
+ self.start_ccsession(c_channel_env)
+
+ # Extract the parameters associated with Bob. This can only be
+ # done after the CC Session is started.
+ self.read_bind10_config()
+
+ # Continue starting the processes. The authoritative server (if
+ # selected):
+ if self.cfg_start_auth:
+ self.start_auth(c_channel_env)
+
+ # ... and resolver (if selected):
+ if self.cfg_start_resolver:
+ self.start_resolver(c_channel_env)
+ self.started_resolver_family = True
+
+ # Everything after the main components can run as non-root.
+ # TODO: this is only temporary - once the privileged socket creator is
+ # fully working, nothing else will run as root.
+ if self.uid is not None:
+ posix.setuid(self.uid)
+
+ # xfrin/xfrout and the zone manager are only meaningful if the
+ # authoritative server has been started.
+ if self.cfg_start_auth:
+ self.start_xfrout(c_channel_env)
+ self.start_xfrin(c_channel_env)
+ self.start_zonemgr(c_channel_env)
+ self.started_auth_family = True
+
+ # ... and finally start the remaining processes
+ self.start_stats(c_channel_env)
+ self.start_stats_httpd(c_channel_env)
+ self.start_cmdctl(c_channel_env)
+
+ if self.cfg_start_dhcp6:
+ self.start_dhcp6(c_channel_env)
+
+ def startup(self):
+ """
+ Start the BoB instance.
+
+ Returns None if successful, otherwise an string describing the
+ problem.
+ """
+ # Try to connect to the c-channel daemon, to see if it is already
+ # running
+ c_channel_env = {}
+ if self.msgq_socket_file is not None:
+ c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+ logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+ return "b10-msgq already running, or socket file not cleaned , cannot start"
+ except isc.cc.session.SessionError:
+ # this is the case we want, where the msgq is not running
+ pass
+
+ # Start all processes. If any one fails to start, kill all started
+ # processes and exit with an error indication.
+ try:
+ self.c_channel_env = c_channel_env
+ self.start_all_processes()
+ except Exception as e:
+ self.kill_started_processes()
+ return "Unable to start " + self.curproc + ": " + str(e)
+
+ # Started successfully
+ self.runnable = True
+ return None
+
+ def stop_all_processes(self):
+ """Stop all processes."""
+ cmd = { "command": ['shutdown']}
+
+ self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
+ self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
+ self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
+ self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
+ self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
+ self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
+ self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
+ self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
+ self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
+ # Terminate the creator last
+ self.stop_creator()
+
+ def stop_process(self, process, recipient):
+ """
+ Stop the given process, friendly-like. The process is the name it has
+ (in logs, etc), the recipient is the address on msgq.
+ """
+ logger.info(BIND10_STOP_PROCESS, process)
+ # TODO: Some timeout to solve processes that don't want to die would
+ # help. We can even store it in the dict, it is used only as a set
+ self.expected_shutdowns[process] = 1
+ # Ask the process to die willingly
+ self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
+ recipient)
+
+ # Series of stop_process wrappers
+ def stop_resolver(self):
+ self.stop_process('b10-resolver', 'Resolver')
+
+ def stop_auth(self):
+ self.stop_process('b10-auth', 'Auth')
+
+ def stop_xfrout(self):
+ self.stop_process('b10-xfrout', 'Xfrout')
+
+ def stop_xfrin(self):
+ self.stop_process('b10-xfrin', 'Xfrin')
+
+ def stop_zonemgr(self):
+ self.stop_process('b10-zonemgr', 'Zonemgr')
+
+ def shutdown(self):
+ """Stop the BoB instance."""
+ logger.info(BIND10_SHUTDOWN)
+ # first try using the BIND 10 request to stop
+ try:
+ self.stop_all_processes()
+ except:
+ pass
+ # XXX: some delay probably useful... how much is uncertain
+ # I have changed the delay from 0.5 to 1, but sometime it's
+ # still not enough.
+ time.sleep(1)
+ self.reap_children()
+ # next try sending a SIGTERM
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.terminate()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ # finally, send SIGKILL (unmaskable termination) until everybody dies
+ while self.processes:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.kill()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+ def _get_process_exit_status(self):
+ return os.waitpid(-1, os.WNOHANG)
+
+ def reap_children(self):
+ """Check to see if any of our child processes have exited,
+ and note this for later handling.
+ """
+ while True:
+ try:
+ (pid, exit_status) = self._get_process_exit_status()
+ except OSError as o:
+ if o.errno == errno.ECHILD: break
+ # XXX: should be impossible to get any other error here
+ raise
+ if pid == 0: break
+ if self.sockcreator is not None and self.sockcreator.pid() == pid:
+ # This is the socket creator, started and terminated
+ # differently. This can't be restarted.
+ if self.runnable:
+ logger.fatal(BIND10_SOCKCREATOR_CRASHED)
+ self.sockcreator = None
+ self.runnable = False
+ elif pid in self.processes:
+ # One of the processes we know about. Get information on it.
+ proc_info = self.processes.pop(pid)
+ proc_info.restart_schedule.set_run_stop_time()
+ self.dead_processes[proc_info.pid] = proc_info
+
+ # Write out message, but only if in the running state:
+ # During startup and shutdown, these messages are handled
+ # elsewhere.
+ if self.runnable:
+ if exit_status is None:
+ logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
+ proc_info.name, proc_info.pid)
+ else:
+ logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
+ proc_info.name, proc_info.pid,
+ exit_status)
+
+ # Was it a special process?
+ if proc_info.name == "b10-msgq":
+ logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
+ self.runnable = False
+
+ # If we're in 'brittle' mode, we want to shutdown after
+ # any process dies.
+ if self.brittle:
+ self.runnable = False
+ else:
+ logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+ def restart_processes(self):
+ """
+ Restart any dead processes:
+
+ * Returns the time when the next process is ready to be restarted.
+ * If the server is shutting down, returns 0.
+ * If there are no processes, returns None.
+
+ The values returned can be safely passed into select() as the
+ timeout value.
+ """
+ next_restart = None
+ # if we're shutting down, then don't restart
+ if not self.runnable:
+ return 0
+ # otherwise look through each dead process and try to restart
+ still_dead = {}
+ now = time.time()
+ for proc_info in self.dead_processes.values():
+ if proc_info.name in self.expected_shutdowns:
+ # We don't restart, we wanted it to die
+ del self.expected_shutdowns[proc_info.name]
+ continue
+ restart_time = proc_info.restart_schedule.get_restart_time(now)
+ if restart_time > now:
+ if (next_restart is None) or (next_restart > restart_time):
+ next_restart = restart_time
+ still_dead[proc_info.pid] = proc_info
+ else:
+ logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
+ try:
+ proc_info.respawn()
+ self.processes[proc_info.pid] = proc_info
+ logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
+ except:
+ still_dead[proc_info.pid] = proc_info
+ # remember any processes that refuse to be resurrected
+ self.dead_processes = still_dead
+ # return the time when the next process is ready to be restarted
+ return next_restart
+
+# global variables, needed for signal handlers
+options = None
+boss_of_bind = None
+
+def reaper(signal_number, stack_frame):
+ """A child process has died (SIGCHLD received)."""
+ # don't do anything...
+ # the Python signal handler has been set up to write
+ # down a pipe, waking up our select() bit
+ pass
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+ """We need to exit (SIGINT or SIGTERM received)."""
+ global options
+ global boss_of_bind
+ logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+ """Function that renames the process if it is requested by a option."""
+ isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+ """
+ Function for parsing command line arguments. Returns the
+ options object from OptionParser.
+ """
+ parser = Parser(version=VERSION)
+ parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+ type="string", default=None,
+ help="UNIX domain socket file the b10-msgq daemon will use")
+ parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
+ default=False, help="disable hot-spot cache in authoritative DNS server")
+ parser.add_option("-u", "--user", dest="user", type="string", default=None,
+ help="Change user after startup (must run as root)")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
+ parser.add_option("--pretty-name", type="string", action="callback",
+ callback=process_rename,
+ help="Set the process name (displayed in ps, top, ...)")
+ parser.add_option("-c", "--config-file", action="store",
+ dest="config_file", default=None,
+ help="Configuration database filename")
+ parser.add_option("-p", "--data-path", dest="data_path",
+ help="Directory to search for configuration files",
+ default=None)
+ parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+ default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+ parser.add_option("--brittle", dest="brittle", action="store_true",
+ help="debugging flag: exit if any component dies")
+
+ (options, args) = parser.parse_args(args)
+
+ if options.cmdctl_port is not None:
+ try:
+ isc.net.parse.port_parse(options.cmdctl_port)
+ except ValueError as e:
+ parser.error(e)
+
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ return options
+
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+
+def main():
+ global options
+ global boss_of_bind
+ # Enforce line buffering on stdout, even when not a TTY
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+ options = parse_args()
+
+ # Check user ID.
+ setuid = None
+ username = None
+ if options.user:
+ # Try getting information about the user, assuming UID passed.
+ try:
+ pw_ent = pwd.getpwuid(int(options.user))
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except ValueError:
+ pass
+ except KeyError:
+ pass
+
+ # Next try getting information about the user, assuming user name
+ # passed.
+ # If the information is both a valid user name and user number, we
+ # prefer the name because we try it second. A minor point, hopefully.
+ try:
+ pw_ent = pwd.getpwnam(options.user)
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except KeyError:
+ pass
+
+ if setuid is None:
+ logger.fatal(BIND10_INVALID_USER, options.user)
+ sys.exit(1)
+
+ # Announce startup.
+ logger.info(BIND10_STARTING, VERSION)
+
+ # Create wakeup pipe for signal handlers
+ wakeup_pipe = os.pipe()
+ signal.set_wakeup_fd(wakeup_pipe[1])
+
+ # Set signal handlers for catching child termination, as well
+ # as our own demise.
+ signal.signal(signal.SIGCHLD, reaper)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ signal.signal(signal.SIGINT, fatal_signal)
+ signal.signal(signal.SIGTERM, fatal_signal)
+
+ # Block SIGPIPE, as we don't want it to end this process
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+ # Go bob!
+ boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+ options.config_file, options.nocache, options.verbose,
+ setuid, username, options.cmdctl_port, options.brittle)
+ startup_result = boss_of_bind.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # In our main loop, we check for dead processes or messages
+ # on the c-channel.
+ wakeup_fd = wakeup_pipe[0]
+ ccs_fd = boss_of_bind.ccs.get_socket().fileno()
+ while boss_of_bind.runnable:
+ # clean up any processes that exited
+ boss_of_bind.reap_children()
+ next_restart = boss_of_bind.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ boss_of_bind.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.shutdown()
+ unlink_pid_file(options.pid_file)
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 1184fd1..b4cfac6 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -37,6 +37,17 @@
"command_description": "List the running BIND 10 processes",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
new file mode 100644
index 0000000..c23d907
--- /dev/null
+++ b/src/bin/bind10/creatorapi.txt
@@ -0,0 +1,123 @@
+Socket creator API
+==================
+
+This API is between Boss and other modules to allow them requesting of sockets.
+For simplicity, we will use the socket creator for all (even non-privileged)
+ports for now, but we should have some function where we can abstract it later.
+
+Goals
+-----
+* Be able to request a socket of any combination IPv4/IPv6 UDP/TCP bound to given
+ port and address (sockets that are not bound to anything can be created
+ without privileges, therefore are not requested from the socket creator).
+* Allow to provide the same socket to multiple modules (eg. multiple running
+ auth servers).
+* Allow releasing the sockets (in case all modules using it give it up,
+ terminate or crash).
+* Allow restricting of the sharing (don't allow shared socket between auth
+ and recursive, as the packets would often get to the wrong application,
+ show error instead).
+* Get the socket to the application.
+
+Transport of sockets
+--------------------
+It seems we are stuck with current msgq for a while and there's a chance the
+new replacement will not be able to send sockets inbound. So, we need another
+channel.
+
+The boss will create a unix-domain socket and listen on it. When something
+requests a socket over the command channel and the socket is created, some kind
+of token is returned to the application (which will represent the future
+socket). The application then connects to the unix-domain socket, sends the
+token over the connection (so Boss will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Boss sends the socket
+in return.
+
+In theory, we could send the requests directly over the unix-domain
+socket, but it has two disadvantages:
+* The msgq handles serializing/deserializing of structured
+ information (like the parameters to be used), we would have to do it
+ manually on the socket.
+* We could place some kind of security in front of msgq (in case file
+ permissions are not enough, for example if they are not honored on
+ socket files, as indicated in the first paragraph of:
+ http://lkml.indiana.edu/hypermail/linux/kernel/0505.2/0008.html).
+ The socket would have to be secured separately. With the tokens,
+ there's some level of security already - someone not having the
+ token can't request a priviledged socket.
+
+Caching of sockets
+------------------
+To allow sending the same socket to multiple application, the Boss process will
+hold a cache. Each socket that is created and sent is kept open in Boss and
+preserved there as well. A reference count is kept with each of them.
+
+When another application asks for the same socket, it is simply sent from the
+cache instead of creating it again by the creator.
+
+When application gives the socket willingly (by sending a message over the
+command channel), the reference count can be decreased without problems. But
+when the application terminates or crashes, we need to decrease it as well.
+There's a problem, since we don't know which command channel connection (eg.
+lname) belongs to which PID. Furthermore, the applications don't need to be
+started by boss.
+
+There are two possibilities:
+* Let the msgq send messages about disconnected clients (eg. group message to
+ some name). This one is better if we want to migrate to dbus, since dbus
+ already has this capability as well as sending the sockets inbound (at least it
+ seems so on unix) and we could get rid of the unix-domain socket completely.
+* Keep the unix-domain connections open forever. Boss can remember which socket
+ was sent to which connection and when the connection closes (because the
+ application crashed), it can drop all the references on the sockets. This
+ seems easier to implement.
+
+The commands
+------------
+* Command to release a socket. This one would have single parameter, the token
+ used to get the socket. After this, boss would decrease its reference count
+ and if it drops to zero, close its own copy of the socket. This should be used
+ when the module stops using the socket (and after closes it). The
+ library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple
+ times in parallel).
+* Command to request a socket. It would have parameters to specify which socket
+ (IP address, address family, port) and how to allow sharing. Sharing would be
+ one of:
+ - None
+ - Same kind of application (however, it is not entirely clear what
+ this means, in case it won't work out intuitively, we'll need to
+ define it somehow)
+ - Any kind of application
+ And a kind of application would be provided, to decide if the sharing is
+ possible (eg. if auth allows sharing with the same kind and something else
+ allows sharing with anything, the sharing is not possible, two auths can).
+
+ It would return either error (the socket can't be created or sharing is not
+ possible) or the token. Then there would be some time for the application to
+ pick up the requested socket.
+
+Examples
+--------
+We probably would have a library with blocking calls to request the
+sockets, so a code could look like:
+
+(socket_fd, token) = request_socket(address, port, 'UDP', SHARE_SAMENAME, 'test-application')
+sock = socket.fromfd(socket_fd)
+
+# Some sock.send and sock.recv stuff here
+
+sock.close()
+release_socket(socket_fd) # or release_socket(token)
+
+Known limitations
+-----------------
+Currently the socket creator doesn't support specifying any socket
+options. If it turns out there are any options that need to be set
+before bind(), we'll need to extend it (and extend the protocol as
+well). If we want to support them, we'll have to solve a possible
+conflict (what to do when two applications request the same socket and
+want to share it, but want different options).
+
+The current socket creator doesn't know raw sockets, but if they are
+needed, it should be easy to add.
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
old mode 100644
new mode 100755
index ba267b4..50e6e29
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -20,17 +20,17 @@ export PYTHON_EXEC
BIND10_PATH=@abs_top_builddir@/src/bin/bind10
-PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:$PATH
+PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 34d809a..d54ee56 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -2,7 +2,14 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
PYTESTS = bind10_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
@@ -13,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 2ffe2b4..424a610 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
# XXX: environment tests are currently disabled, due to the preprocessor
# setup that we have now complicating the environment
@@ -26,6 +26,7 @@ import socket
from isc.net.addr import IPAddr
import time
import isc
+import isc.log
from isc.testutils.parse_args import TestOptParser, OptsError
@@ -111,6 +112,9 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.cfg_start_auth, True)
self.assertEqual(bob.cfg_start_resolver, False)
+ self.assertEqual(bob.cfg_start_dhcp4, False)
+ self.assertEqual(bob.cfg_start_dhcp6, False)
+
def test_init_alternate_socket(self):
bob = BoB("alt_socket_file")
self.assertEqual(bob.verbose, False)
@@ -125,6 +129,8 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.nocache, False)
self.assertEqual(bob.cfg_start_auth, True)
self.assertEqual(bob.cfg_start_resolver, False)
+ self.assertEqual(bob.cfg_start_dhcp4, False)
+ self.assertEqual(bob.cfg_start_dhcp6, False)
def test_command_handler(self):
class DummySession():
@@ -141,6 +147,12 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.command_handler("shutdown", None),
isc.config.ccsession.create_answer(0))
self.assertFalse(bob.runnable)
+ # "getstats" command
+ self.assertEqual(bob.command_handler("getstats", None),
+ isc.config.ccsession.create_answer(0,
+ { "stats_data": {
+ 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }}))
# "sendstats" command
self.assertEqual(bob.command_handler("sendstats", None),
isc.config.ccsession.create_answer(0))
@@ -187,6 +199,13 @@ class MockBob(BoB):
self.cmdctl = False
self.c_channel_env = {}
self.processes = { }
+ self.creator = False
+
+ def start_creator(self):
+ self.creator = True
+
+ def stop_creator(self, kill=False):
+ self.creator = False
def read_bind10_config(self):
# Configuration options are set directly
@@ -247,6 +266,16 @@ class MockBob(BoB):
self.processes[12] = ProcessInfo('b10-cmdctl', ['/bin/false'])
self.processes[12].pid = 12
+ def start_dhcp6(self, c_channel_env):
+ self.dhcp6 = True
+ self.processes[13] = ProcessInfo('b10-dhcp6', ['/bin/false'])
+ self.processes[13]
+
+ def start_dhcp4(self, c_channel_env):
+ self.dhcp4 = True
+ self.processes[14] = ProcessInfo('b10-dhcp4', ['/bin/false'])
+ self.processes[14]
+
# We don't really use all of these stop_ methods. But it might turn out
# someone would add some stop_ method to BoB and we want that one overriden
# in case he forgets to update the tests.
@@ -321,6 +350,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.assertEqual(bob.msgq, core)
self.assertEqual(bob.cfgmgr, core)
self.assertEqual(bob.ccsession, core)
+ self.assertEqual(bob.creator, core)
self.assertEqual(bob.auth, auth)
self.assertEqual(bob.resolver, resolver)
self.assertEqual(bob.xfrout, auth)
@@ -359,6 +389,24 @@ class TestStartStopProcessesBob(unittest.TestCase):
"""
self.check_started(bob, True, False, True)
+ def check_started_dhcp(self, bob, v4, v6):
+ """
+ Check if proper combinations of DHCPv4 and DHCpv6 can be started
+ """
+ v4found = 0
+ v6found = 0
+
+ for pid in bob.processes:
+ if (bob.processes[pid].name == "b10-dhcp4"):
+ v4found += 1
+ if (bob.processes[pid].name == "b10-dhcp6"):
+ v6found += 1
+
+ # there should be exactly one DHCPv4 daemon (if v4==True)
+ # there should be exactly one DHCPv6 daemon (if v6==True)
+ self.assertEqual(v4==True, v4found==1)
+ self.assertEqual(v6==True, v6found==1)
+
# Checks the processes started when starting neither auth nor resolver
# is specified.
def test_start_none(self):
@@ -524,6 +572,40 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.config_handler({'start_auth': True, 'start_resolver': True})
+ # Checks that DHCP (v4 and v6) processes are started when expected
+ def test_start_dhcp(self):
+
+ # Create BoB and ensure correct initialization
+ bob = MockBob()
+ self.check_preconditions(bob)
+
+ # don't care about DNS stuff
+ bob.cfg_start_auth = False
+ bob.cfg_start_resolver = False
+
+ # v4 and v6 disabled
+ bob.cfg_start_dhcp6 = False
+ bob.cfg_start_dhcp4 = False
+ bob.start_all_processes()
+ self.check_started_dhcp(bob, False, False)
+
+ # v6 only enabled
+ bob.cfg_start_dhcp6 = True
+ bob.cfg_start_dhcp4 = False
+ bob.start_all_processes()
+ self.check_started_dhcp(bob, False, True)
+
+ # uncomment when dhcpv4 becomes implemented
+ # v4 only enabled
+ #bob.cfg_start_dhcp6 = False
+ #bob.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(bob, True, False)
+
+ # both v4 and v6 enabled
+ #bob.cfg_start_dhcp6 = True
+ #bob.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(bob, True, True)
+
class TestBossCmd(unittest.TestCase):
def test_ping(self):
"""
@@ -697,4 +779,5 @@ class TestBrittle(unittest.TestCase):
self.assertFalse(bob.runnable)
if __name__ == '__main__':
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index 2f412ec..700f26e 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -5,6 +5,8 @@ man_MANS = bindctl.1
EXTRA_DIST = $(man_MANS) bindctl.xml
+noinst_SCRIPTS = run_bindctl.sh
+
python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
mycollections.py
pythondir = $(pyexecdir)/bindctl
@@ -25,3 +27,8 @@ bindctl: bindctl_main.py
-e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
-e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl_main.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 8973aa5..8c2b674 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -398,6 +398,8 @@ class BindCmdInterpreter(Cmd):
print("Error: " + str(dte))
except isc.cc.data.DataNotFoundError as dnfe:
print("Error: " + str(dnfe))
+ except isc.cc.data.DataAlreadyPresentError as dape:
+ print("Error: " + str(dape))
except KeyError as ke:
print("Error: missing " + str(ke))
else:
@@ -634,7 +636,15 @@ class BindCmdInterpreter(Cmd):
# we have more data to show
line += "/"
else:
- line += "\t" + json.dumps(value_map['value'])
+ # if type is named_set, don't print value if None
+ # (it is either {} meaning empty, or None, meaning
+ # there actually is data, but not to be shown with
+ # the current command
+ if value_map['type'] == 'named_set' and\
+ value_map['value'] is None:
+ line += "/\t"
+ else:
+ line += "\t" + json.dumps(value_map['value'])
line += "\t" + value_map['type']
line += "\t"
if value_map['default']:
@@ -649,10 +659,9 @@ class BindCmdInterpreter(Cmd):
data, default = self.config_data.get_value(identifier)
print(json.dumps(data))
elif cmd.command == "add":
- if 'value' in cmd.params:
- self.config_data.add_value(identifier, cmd.params['value'])
- else:
- self.config_data.add_value(identifier)
+ self.config_data.add_value(identifier,
+ cmd.params.get('value_or_name'),
+ cmd.params.get('value_for_set'))
elif cmd.command == "remove":
if 'value' in cmd.params:
self.config_data.remove_value(identifier, cmd.params['value'])
@@ -674,9 +683,12 @@ class BindCmdInterpreter(Cmd):
elif cmd.command == "revert":
self.config_data.clear_local_changes()
elif cmd.command == "commit":
- self.config_data.commit()
+ try:
+ self.config_data.commit()
+ except isc.config.ModuleCCSessionError as mcse:
+ print(str(mcse))
elif cmd.command == "diff":
- print(self.config_data.get_local_changes());
+ print(self.config_data.get_local_changes())
elif cmd.command == "go":
self.go(identifier)
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 01307e9..ee4191d 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -50,17 +50,28 @@ def prepare_config_commands(tool):
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
+ cmd = CommandInfo(name = "add", desc =
+ "Add an entry to configuration list or a named set. "
+ "When adding to a list, the command has one optional argument, "
+ "a value to add to the list. The value must be in correct JSON "
+ "and complete. When adding to a named set, it has one "
+ "mandatory parameter (the name to add), and an optional "
+ "parameter value, similar to when adding to a list. "
+ "In either case, when no value is given, an entry will be "
+ "constructed with default values.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value_or_name", type = "string", optional=True, desc = "Specifies a value to add to the list, or the name when adding to a named set. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+ param = ParamInfo(name = "value_for_set", type = "string", optional=True, desc = "Specifies an optional value to add to the named map. It must be in correct JSON format and complete.")
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
+ cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list or named set.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value", type = "string", optional=True, desc = "When identifier is a list, specifies a value to remove from the list. It must be in correct JSON format and complete. When it is a named set, specifies the name to remove.")
cmd.add_param(param)
module.add_command(cmd)
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
old mode 100644
new mode 100755
index 730ce1e..f4cc40c
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -20,9 +20,17 @@ export PYTHON_EXEC
BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
B10_FROM_SOURCE=@abs_top_srcdir@
export B10_FROM_SOURCE
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index d2bb90f..3d08a17 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = bindctl_test.py cmdparse_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/cfgmgr/Makefile.am b/src/bin/cfgmgr/Makefile.am
index fc0ed4a..aee78cf 100644
--- a/src/bin/cfgmgr/Makefile.am
+++ b/src/bin/cfgmgr/Makefile.am
@@ -28,3 +28,8 @@ install-data-local:
$(mkinstalldirs) $(DESTDIR)/@localstatedir@/@PACKAGE@
# TODO: permissions handled later
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 16c8f76..2ccc430 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -17,7 +17,7 @@
import sys; sys.path.append ('@@PYTHONPATH@@')
-from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError
+import bind10_config
from isc.cc import SessionError
import isc.util.process
import signal
@@ -25,27 +25,17 @@ import os
from optparse import OptionParser
import glob
import os.path
+import isc.log
+isc.log.init("b10-cfgmgr")
+from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError, logger
+from isc.log_messages.cfgmgr_messages import *
isc.util.process.rename()
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to the value of that variable, or, if defined,
-# relative to the value of B10_FROM_SOURCE_LOCALSTATEDIR. Otherwise
-# we use the ones installed on the system.
-# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
-# tests where we want to use variuos types of configuration within the test
-# environment. (We may want to make it even more generic so that the path is
-# passed from the boss process)
-if "B10_FROM_SOURCE" in os.environ:
- if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
- DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
- else:
- DATA_PATH = os.environ["B10_FROM_SOURCE"]
- PLUGIN_PATHS = [DATA_PATH + '/src/bin/cfgmgr/plugins']
-else:
- PREFIX = "@prefix@"
- DATA_PATH = "@localstatedir@/@PACKAGE@".replace("${prefix}", PREFIX)
- PLUGIN_PATHS = ["@prefix@/share/@PACKAGE@/config_plugins"]
+# Import some paths from our configuration
+DATA_PATH = bind10_config.DATA_PATH
+PLUGIN_PATHS = bind10_config.PLUGIN_PATHS
+PREFIX = bind10_config.PREFIX
DEFAULT_CONFIG_FILE = "b10-config.db"
cm = None
@@ -104,13 +94,12 @@ def main():
cm.notify_boss()
cm.run()
except SessionError as se:
- print("[b10-cfgmgr] Error creating config manager, "
- "is the command channel daemon running?")
+ logger.fatal(CFGMGR_CC_SESSION_ERROR, se)
return 1
except KeyboardInterrupt as kie:
- print("[b10-cfgmgr] Interrupted, exiting")
+ logger.info(CFGMGR_STOPPED_BY_KEYBOARD)
except ConfigManagerDataReadError as cmdre:
- print("[b10-cfgmgr] " + str(cmdre))
+ logger.fatal(CFGMGR_DATA_READ_ERROR, cmdre)
return 2
return 0
diff --git a/src/bin/cfgmgr/plugins/Makefile.am b/src/bin/cfgmgr/plugins/Makefile.am
index 952fde6..529a4ed 100644
--- a/src/bin/cfgmgr/plugins/Makefile.am
+++ b/src/bin/cfgmgr/plugins/Makefile.am
@@ -1 +1,12 @@
-EXTRA_DIST = README
+SUBDIRS = tests
+EXTRA_DIST = README tsig_keys.py tsig_keys.spec
+EXTRA_DIST += logging.spec b10logging.py
+
+config_plugindir = @prefix@/share/@PACKAGE@/config_plugins
+config_plugin_DATA = tsig_keys.py tsig_keys.spec
+config_plugin_DATA += b10logging.py logging.spec
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/plugins/b10logging.py b/src/bin/cfgmgr/plugins/b10logging.py
new file mode 100644
index 0000000..e288c6d
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/b10logging.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This is the configuration plugin for logging options
+# The name is 'b10logging' because logging.py is an existing module
+#
+# For a technical background, see
+# http://bind10.isc.org/wiki/LoggingCppApiDesign
+#
+
+from isc.config.module_spec import module_spec_from_file
+from isc.util.file import path_search
+from bind10_config import PLUGIN_PATHS
+spec = module_spec_from_file(path_search('logging.spec', PLUGIN_PATHS))
+
+ALLOWED_SEVERITIES = [ 'default',
+ 'debug',
+ 'info',
+ 'warn',
+ 'error',
+ 'fatal',
+ 'none' ]
+ALLOWED_DESTINATIONS = [ 'console',
+ 'file',
+ 'syslog' ]
+ALLOWED_STREAMS = [ 'stdout',
+ 'stderr' ]
+
+def check(config):
+ # Check the data layout first
+ errors=[]
+ if not spec.validate_config(False, config, errors):
+ return ' '.join(errors)
+ # The 'layout' is ok, now check for specific values
+ if 'loggers' in config:
+ for logger in config['loggers']:
+ # name should always be present
+ name = logger['name']
+ # report an error if name starts with * but not *.,
+ # or if * is not the first character.
+ # TODO: we might want to also warn or error if the
+ # logger name is not an existing module, but we can't
+ # really tell that from here at this point
+ star_pos = name.find('*')
+ if star_pos > 0 or\
+ name == '*.' or\
+ (star_pos == 0 and len(name) > 1 and name[1] != '.'):
+ errors.append("Bad logger name: '" + name + "': * can "
+ "only be used instead of the full "
+ "first-level name, e.g. '*' or "
+ "'*.subsystem'")
+
+ if 'severity' in logger and\
+ logger['severity'].lower() not in ALLOWED_SEVERITIES:
+ errors.append("bad severity value for logger " + name +
+ ": " + logger['severity'])
+ if 'output_options' in logger:
+ for output_option in logger['output_options']:
+ if 'destination' in output_option:
+ destination = output_option['destination'].lower()
+ if destination not in ALLOWED_DESTINATIONS:
+ errors.append("bad destination for logger " +
+ name + ": " + output_option['destination'])
+ else:
+ # if left to default, output is stdout, and
+ # it will not show in the updated config,
+ # so 1. we only need to check it if present,
+ # and 2. if destination is changed, so should
+ # output. So first check checks 'in', and the
+ # others 'not in' for 'output'
+ if destination == "console" and\
+ 'output' in output_option and\
+ output_option['output'] not in ALLOWED_STREAMS:
+ errors.append("bad output for logger " + name +
+ ": " + output_option['output'] +
+ ", must be stdout or stderr")
+ elif destination == "file" and\
+ ('output' not in output_option or\
+ output_option['output'] == ""):
+ errors.append("destination set to file but "
+ "output not set to any "
+ "filename for logger "
+ + name)
+ elif destination == "syslog" and\
+ 'output' not in output_option or\
+ output_option['output'] == "":
+ errors.append("destination set to syslog but "
+ "output not set to any facility"
+ " for logger " + name)
+
+ if errors:
+ return ', '.join(errors)
+ return None
+
+def load():
+ return (spec, check)
+
diff --git a/src/bin/cfgmgr/plugins/logging.spec b/src/bin/cfgmgr/plugins/logging.spec
new file mode 100644
index 0000000..e377b0e
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/logging.spec
@@ -0,0 +1,81 @@
+{
+ "module_spec": {
+ "module_name": "Logging",
+ "module_description": "Logging options",
+ "config_data": [
+ {
+ "item_name": "loggers",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "logger",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "name",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ { "item_name": "severity",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "INFO"
+ },
+ { "item_name": "debuglevel",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0
+ },
+ { "item_name": "additive",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
+ },
+ { "item_name": "output_options",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "output_option",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "destination",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "console"
+ },
+ { "item_name": "output",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "stdout"
+ },
+ { "item_name": "flush",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
+ },
+ { "item_name": "maxsize",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0
+ },
+ { "item_name": "maxver",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ ],
+ "commands": []
+ }
+}
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
new file mode 100644
index 0000000..ffea2d7
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -0,0 +1,27 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = tsig_keys_test.py logging_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
diff --git a/src/bin/cfgmgr/plugins/tests/logging_test.py b/src/bin/cfgmgr/plugins/tests/logging_test.py
new file mode 100644
index 0000000..818a596
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/logging_test.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Make sure we can load the module, put it into path
+import sys
+import os
+sys.path.extend(os.environ["B10_TEST_PLUGIN_DIR"].split(':'))
+
+import b10logging
+import unittest
+
+class LoggingConfCheckTest(unittest.TestCase):
+ def test_load(self):
+ """
+ Checks the entry point returns the correct values.
+ """
+ (spec, check) = b10logging.load()
+ # It returns the checking function
+ self.assertEqual(check, b10logging.check)
+ # The plugin stores it's spec
+ self.assertEqual(spec, b10logging.spec)
+
+ def test_logger_conf(self):
+ self.assertEqual(None,
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'DEBUG',
+ 'debuglevel': 50,
+ 'output_options':
+ [{'destination': 'file',
+ 'output': '/some/file'
+ }]
+ },
+ {'name': 'b10-resolver',
+ 'severity': 'WARN',
+ 'additive': True,
+ 'output_options':
+ [{'destination': 'console',
+ 'output': 'stderr',
+ 'flush': True
+ }]
+ },
+ {'name': 'b10-resolver.resolver',
+ 'severity': 'ERROR',
+ 'output_options': []
+ },
+ {'name': '*.cache',
+ 'severity': 'INFO'
+ }
+ ]}))
+ def do_bad_name_test(self, name):
+ err_str = "Bad logger name: '" + name + "': * can only be "\
+ "used instead of the full first-level name, e.g. "\
+ "'*' or '*.subsystem'"
+ self.assertEqual(err_str,
+ b10logging.check({'loggers':
+ [{'name': name,
+ 'severity': 'DEBUG'},
+ ]}))
+
+ def test_logger_bad_name(self):
+ self.do_bad_name_test("*.")
+ self.do_bad_name_test("*foo")
+ self.do_bad_name_test("*foo.lib")
+ self.do_bad_name_test("foo*")
+ self.do_bad_name_test("foo*.lib")
+
+ def test_logger_bad_severity(self):
+ self.assertEqual('bad severity value for logger *: BADVAL',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'BADVAL'}]}))
+
+ def test_logger_bad_destination(self):
+ self.assertEqual('bad destination for logger *: baddest',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'baddest' }
+ ]}]}))
+
+ def test_logger_bad_console_output(self):
+ self.assertEqual('bad output for logger *: bad_output, must be stdout or stderr',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'console',
+ 'output': 'bad_output'
+ }
+ ]}]}))
+
+ def test_logger_bad_file_output(self):
+ self.assertEqual('destination set to file but output not set to any filename for logger *',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'file' }
+ ]}]}))
+
+ def test_logger_bad_syslog_output(self):
+ self.assertEqual('destination set to syslog but output not set to any facility for logger *',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'syslog' }
+ ]}]}))
+
+ def test_logger_bad_type(self):
+ self.assertEqual('123 should be a string',
+ b10logging.check({'loggers':
+ [{'name': 123,
+ 'severity': 'INFO'}]}))
+ self.assertEqual('123 should be a string',
+ b10logging.check({'loggers':
+ [{'name': 'bind10',
+ 'severity': 123}]}))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py b/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
new file mode 100644
index 0000000..808f28a
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Make sure we can load the module, put it into path
+import sys
+import os
+sys.path.extend(os.environ["B10_TEST_PLUGIN_DIR"].split(':'))
+
+import tsig_keys
+import unittest
+import isc.config.module_spec
+
+class TSigKeysTest(unittest.TestCase):
+ def test_load(self):
+ """
+ Checks the entry point returns the correct values.
+ """
+ (spec, check) = tsig_keys.load()
+ # It returns the checking function
+ self.assertEqual(check, tsig_keys.check)
+ # The plugin stores it's spec
+ self.assertEqual(spec, tsig_keys.spec)
+
+ def test_spec(self):
+ """
+ Checks the spec is looking sane (doesn't do really deep check here).
+ """
+ spec = tsig_keys.spec
+ # In python, we don't generally check the type of something, because
+ # of the duck typing.
+ # But this is unittest, so we check it does what we intend and
+ # supplying that's behaving the same but is different is not our
+ # intention
+ self.assertTrue(isinstance(spec, isc.config.module_spec.ModuleSpec))
+ # Correct name
+ self.assertEqual("tsig_keys", spec.get_module_name())
+ # There are no commands, nobody would handle them anyway
+ self.assertEqual([], spec.get_commands_spec())
+ # There's some nonempty configuration
+ self.assertNotEqual({}, spec.get_config_spec())
+
+ def test_missing_keys(self):
+ """
+ Test that missing keys doesn't kill us. There are just no keys there.
+ """
+ self.assertEqual(None, tsig_keys.check({}))
+
+ def test_data_empty(self):
+ """Check we accept valid config with empty set of tsig keys."""
+ self.assertEqual(None, tsig_keys.check({'keys': []}))
+
+ def test_keys_valid(self):
+ """
+ Check we accept some valid keys (we don't check all the algorithms,
+ that's the job of isc.dns.TSIGKey).
+ """
+ self.assertEqual(None, tsig_keys.check({'keys':
+ ['testkey:QklORCAxMCBpcyBjb29sCg==',
+ 'test.key:QklORCAxMCBpcyBjb29sCg==:hmac-sha1']}))
+
+ def test_keys_same_name(self):
+ """
+ Test we reject when we have multiple keys with the same name.
+ """
+ self.assertEqual("Multiple TSIG keys with name 'test.key.'",
+ tsig_keys.check({'keys':
+ ['test.key:QklORCAxMCBpcyBjb29sCg==',
+ 'test.key:b3RoZXIK']}))
+
+ def test_invalid_key(self):
+ """
+ Test we reject invalid key.
+ """
+ self.assertEqual("TSIG: Invalid TSIG key string: invalid.key",
+ tsig_keys.check({'keys': ['invalid.key']}))
+ self.assertEqual(
+ "TSIG: Unexpected end of input in BASE decoder",
+ tsig_keys.check({'keys': ['invalid.key:123']}))
+
+ def test_bad_format(self):
+ """
+ Test we fail on bad format. We don't really care much how here, though,
+ as this should not get in trough config manager anyway.
+ """
+ self.assertNotEqual(None, tsig_keys.check({'bad_name': {}}))
+ self.assertNotEqual(None, tsig_keys.check({'keys': 'not_list'}))
+ self.assertNotEqual(None, tsig_keys.check({'keys': 42}))
+ self.assertNotEqual(None, tsig_keys.check({'keys': {}}))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/bin/cfgmgr/plugins/tsig_keys.py b/src/bin/cfgmgr/plugins/tsig_keys.py
new file mode 100644
index 0000000..d57e645
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tsig_keys.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This is the plugin for tsig_keys configuration section. The TSIG keyring
+# lives there (eg. all the shared secrets, with some exceptions where there
+# are some TSIG keys elsewhere, but these should be removed soon). We do
+# sanity checking of user configuration here, simply by trying to construct
+# all the keys here.
+
+from isc.config.module_spec import module_spec_from_file
+from isc.util.file import path_search
+from pydnspp import TSIGKey, InvalidParameter
+from bind10_config import PLUGIN_PATHS
+spec = module_spec_from_file(path_search('tsig_keys.spec', PLUGIN_PATHS))
+
+def check(config):
+ # Check the data layout first
+ errors=[]
+ if not spec.validate_config(False, config, errors):
+ return ' '.join(errors)
+ # Get the list of keys, if any
+ keys = config.get('keys', [])
+ # Run through them, check they can be constructed and there are no
+ # duplicates
+ keyNames = set()
+ for key in keys:
+ try:
+ name = str(TSIGKey(key).get_key_name())
+ except InvalidParameter as e:
+ return "TSIG: " + str(e)
+ if name in keyNames:
+ return "Multiple TSIG keys with name '" + name + "'"
+ keyNames.add(name)
+ # No error found, so let's assume it's OK
+ return None
+
+def load():
+ return (spec, check)
diff --git a/src/bin/cfgmgr/plugins/tsig_keys.spec b/src/bin/cfgmgr/plugins/tsig_keys.spec
new file mode 100644
index 0000000..e558dd2
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tsig_keys.spec
@@ -0,0 +1,21 @@
+{
+ "module_spec": {
+ "module_name": "tsig_keys",
+ "module_description": "The TSIG keyring is stored here",
+ "config_data": [
+ {
+ "item_name": "keys",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "key",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ }
+ }
+ ],
+ "commands": []
+ }
+}
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index 68666e6..a2e43ff 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -1,7 +1,15 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-cfgmgr_test.py
-EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
+noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/plugins/testplugin.py
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
@@ -12,7 +20,14 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env TESTDATA_PATH=$(abs_srcdir)/testdata \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ TESTDATA_PATH=$(abs_srcdir)/testdata \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
+
+CLEANDIRS = testdata/plugins/__pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
index 37cd0f5..ea5fc8b 100644
--- a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
+++ b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
@@ -20,6 +20,7 @@
import unittest
import os
import sys
+import bind10_config
from isc.testutils.parse_args import OptsError, TestOptParser
class MyConfigManager:
@@ -110,6 +111,7 @@ class TestConfigManagerStartup(unittest.TestCase):
env_var = os.environ["B10_FROM_SOURCE"]
os.environ["B10_FROM_SOURCE"] = tmp_env_var
+ bind10_config.reload()
b = __import__("b10-cfgmgr", globals(), locals())
b.PLUGIN_PATH = [] # It's enough to test plugins in one test
b.ConfigManager = MyConfigManager
@@ -117,6 +119,7 @@ class TestConfigManagerStartup(unittest.TestCase):
if env_var != None:
os.environ["B10_FROM_SOURCE"] = env_var
+ bind10_config.reload()
sys.modules.pop("b10-cfgmgr")
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index 04cf5e2..e302fa6 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -4,6 +4,9 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-cmdctl
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
b10_cmdctldir = $(pkgdatadir)
# NOTE: this will overwrite on install
@@ -18,10 +21,12 @@ b10_cmdctl_DATA += cmdctl.spec
EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
-CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.pyc
man_MANS = b10-cmdctl.8
-EXTRA_DIST += $(man_MANS) b10-cmdctl.xml
+EXTRA_DIST += $(man_MANS) b10-cmdctl.xml cmdctl_messages.mes
if ENABLE_MAN
@@ -33,20 +38,30 @@ endif
cmdctl.spec: cmdctl.spec.pre
$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
+$(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
chmod a+x $@
if INSTALL_CONFIGURATIONS
-# TODO: permissions handled later
+# Below we intentionally use ${INSTALL} -m 640 instead of $(INSTALL_DATA)
+# because these file will contain sensitive information.
install-data-local:
$(mkinstalldirs) $(DESTDIR)/@sysconfdir@/@PACKAGE@
for f in $(CMDCTL_CONFIGURATIONS) ; do \
if test ! -f $(DESTDIR)$(sysconfdir)/@PACKAGE@/$$f; then \
- $(INSTALL_DATA) $(srcdir)/$$f $(DESTDIR)$(sysconfdir)/@PACKAGE@/ ; \
+ ${INSTALL} -m 640 $(srcdir)/$$f $(DESTDIR)$(sysconfdir)/@PACKAGE@/ ; \
fi ; \
done
endif
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index f1c1021..fcd69b8 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -47,6 +47,18 @@ import isc.net.parse
from optparse import OptionParser, OptionValueError
from hashlib import sha1
from isc.util import socketserver_mixin
+from isc.log_messages.cmdctl_messages import *
+
+# TODO: these debug-levels are hard-coded here; we are planning on
+# creating a general set of debug levels, see ticket #1074. When done,
+# we should remove these values and use the general ones in the
+# logger.debug calls
+
+# Debug level for communication with BIND10
+DBG_CMDCTL_MESSAGING = 30
+
+isc.log.init("b10-cmdctl")
+logger = isc.log.Logger("cmdctl")
try:
import threading
@@ -173,7 +185,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
if not user_name:
return False, ["need user name"]
if not self.server.get_user_info(user_name):
- return False, ["user doesn't exist"]
+ logger.info(CMDCTL_NO_SUCH_USER, user_name)
+ return False, ["username or password error"]
user_pwd = user_info.get('password')
if not user_pwd:
@@ -181,7 +194,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
local_info = self.server.get_user_info(user_name)
pwd_hashval = sha1((user_pwd + local_info[1]).encode())
if pwd_hashval.hexdigest() != local_info[0]:
- return False, ["password doesn't match"]
+ logger.info(CMDCTL_BAD_PASSWORD, user_name)
+ return False, ["username or password error"]
return True, None
@@ -281,7 +295,7 @@ class CommandControl():
errstr = 'unknown config item: ' + key
if errstr != None:
- self.log_info('Fail to apply config data, ' + errstr)
+ logger.error(CMDCTL_BAD_CONFIG_DATA, errstr);
return ccsession.create_answer(1, errstr)
return ccsession.create_answer(0)
@@ -387,8 +401,8 @@ class CommandControl():
'''Send the command from bindctl to proper module. '''
errstr = 'unknown error'
answer = None
- if self._verbose:
- self.log_info("Begin send command '%s' to module '%s'" %(command_name, module_name))
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_SEND_COMMAND,
+ command_name, module_name)
if module_name == self._module_name:
# Process the command sent to cmdctl directly.
@@ -396,15 +410,14 @@ class CommandControl():
else:
msg = ccsession.create_command(command_name, params)
seq = self._cc.group_sendmsg(msg, module_name)
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_COMMAND_SENT,
+ command_name, module_name)
#TODO, it may be blocked, msqg need to add a new interface waiting in timeout.
try:
answer, env = self._cc.group_recvmsg(False, seq)
except isc.cc.session.SessionTimeout:
errstr = "Module '%s' not responding" % module_name
- if self._verbose:
- self.log_info("Finish send command '%s' to module '%s'" % (command_name, module_name))
-
if answer:
try:
rcode, arg = ccsession.parse_answer(answer)
@@ -415,16 +428,13 @@ class CommandControl():
else:
return rcode, {}
else:
- # TODO: exception
errstr = str(answer['result'][1])
except ccsession.ModuleCCSessionError as mcse:
errstr = str("Error in ccsession answer:") + str(mcse)
- self.log_info(errstr)
+
+ logger.error(CMDCTL_COMMAND_ERROR, command_name, module_name, errstr)
return 1, {'error': errstr}
- def log_info(self, msg):
- sys.stdout.write("[b10-cmdctl] %s\n" % str(msg))
-
def get_cmdctl_config_data(self):
''' If running in source code tree, use keyfile, certificate
and user accounts file in source code. '''
@@ -481,14 +491,15 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
for row in reader:
self._user_infos[row[0]] = [row[1], row[2]]
except (IOError, IndexError) as e:
- self.log_info("Fail to read user database, %s" % e)
+ logger.error(CMDCTL_USER_DATABASE_READ_ERROR,
+ accounts_file, e)
finally:
if csvfile:
csvfile.close()
self._accounts_file = accounts_file
if len(self._user_infos) == 0:
- self.log_info("Fail to get user information, will deny any user")
+ logger.error(CMDCTL_NO_USER_ENTRIES_READ)
def get_user_info(self, username):
'''Get user's salt and hashed string. If the user
@@ -520,7 +531,7 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
ssl_version = ssl.PROTOCOL_SSLv23)
return ssl_sock
except (ssl.SSLError, CmdctlException) as err :
- self.log_info("Deny client's connection because %s" % str(err))
+ logger.info(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
self.close_request(sock)
# raise socket error to finish the request
raise socket.error
@@ -547,9 +558,6 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def send_command_to_module(self, module_name, command_name, params):
return self.cmdctl.send_command_with_check(module_name, command_name, params)
- def log_info(self, msg):
- sys.stdout.write("[b10-cmdctl] %s\n" % str(msg))
-
httpd = None
def signal_handler(signal, frame):
@@ -607,15 +615,13 @@ if __name__ == '__main__':
run(options.addr, options.port, options.idle_timeout, options.verbose)
result = 0
except isc.cc.SessionError as err:
- sys.stderr.write("[b10-cmdctl] Error creating b10-cmdctl, "
- "is the command channel daemon running?\n")
+ logger.fatal(CMDCTL_CC_SESSION_ERROR, err)
except isc.cc.SessionTimeout:
- sys.stderr.write("[b10-cmdctl] Error creating b10-cmdctl, "
- "is the configuration manager running?\n")
+ logger.fatal(CMDCTL_CC_SESSION_TIMEOUT)
except KeyboardInterrupt:
- sys.stderr.write("[b10-cmdctl] exit from Cmdctl\n")
+ logger.info(CMDCTL_STOPPED_BY_KEYBOARD)
except CmdctlException as err:
- sys.stderr.write("[b10-cmdctl] " + str(err) + "\n")
+ logger.fatal(CMDCTL_UNCAUGHT_EXCEPTION, err);
if httpd:
httpd.shutdown()
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
new file mode 100644
index 0000000..e007296
--- /dev/null
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -0,0 +1,81 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the cmdctl_messages python module.
+
+% CMDCTL_BAD_CONFIG_DATA error in config data: %1
+There was an error reading the updated configuration data. The specific
+error is printed.
+
+% CMDCTL_BAD_PASSWORD bad password for user: %1
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+
+% CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+
+% CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+
+% CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent
+This debug message indicates that the given command has been sent to
+the given module.
+
+% CMDCTL_NO_SUCH_USER username not found in user database: %1
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_SEND_COMMAND sending command %1 to module %2
+This debug message indicates that the given command is being sent to
+the given module.
+
+% CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+
+% CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+
+% CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+
+% CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
diff --git a/src/bin/cmdctl/run_b10-cmdctl.sh.in b/src/bin/cmdctl/run_b10-cmdctl.sh.in
index 6a519e1..7e63249 100644
--- a/src/bin/cmdctl/run_b10-cmdctl.sh.in
+++ b/src/bin/cmdctl/run_b10-cmdctl.sh.in
@@ -19,9 +19,17 @@ PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
CMD_CTRLD_PATH=@abs_top_builddir@/src/bin/cmdctl
-PYTHONPATH=@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 6a4d7d4..89d89ea 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = cmdctl_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,7 +18,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/cmdctl/tests/cmdctl_test.py b/src/bin/cmdctl/tests/cmdctl_test.py
index 5463c36..3103f47 100644
--- a/src/bin/cmdctl/tests/cmdctl_test.py
+++ b/src/bin/cmdctl/tests/cmdctl_test.py
@@ -19,6 +19,7 @@ import socket
import tempfile
import sys
from cmdctl import *
+import isc.log
SPEC_FILE_PATH = '..' + os.sep
if 'CMDCTL_SPEC_PATH' in os.environ:
@@ -173,7 +174,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
self.handler.server._user_infos['root'] = ['aa', 'aaa']
ret, msg = self.handler._check_user_name_and_pwd()
self.assertFalse(ret)
- self.assertEqual(msg, ['password doesn\'t match'])
+ self.assertEqual(msg, ['username or password error'])
def test_check_user_name_and_pwd_2(self):
user_info = {'username':'root', 'password':'abc123'}
@@ -214,7 +215,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
ret, msg = self.handler._check_user_name_and_pwd()
self.assertFalse(ret)
- self.assertEqual(msg, ['user doesn\'t exist'])
+ self.assertEqual(msg, ['username or password error'])
def test_do_POST(self):
self.handler.headers = {}
@@ -447,6 +448,7 @@ class TestFuncNotInClass(unittest.TestCase):
if __name__== "__main__":
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/host/Makefile.am b/src/bin/host/Makefile.am
index 0758cb9..a8f96c2 100644
--- a/src/bin/host/Makefile.am
+++ b/src/bin/host/Makefile.am
@@ -10,17 +10,21 @@ endif
CLEANFILES = *.gcno *.gcda
-bin_PROGRAMS = host
-host_SOURCES = host.cc
-host_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
-host_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-
-#man_MANS = host.1
-#EXTRA_DIST = $(man_MANS) host.xml
-#
-#if ENABLE_MAN
-#
-#host.1: host.xml
-# xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/host.xml
-#
-#endif
+bin_PROGRAMS = b10-host
+b10_host_SOURCES = host.cc
+b10_host_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+b10_host_LDADD += $(top_builddir)/src/lib/util/libutil.la
+b10_host_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
+man_MANS = b10-host.1
+EXTRA_DIST = $(man_MANS) b10-host.xml
+
+.PHONY: man
+if ENABLE_MAN
+
+man: b10-host.1
+
+b10-host.1: b10-host.xml
+ xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-host.xml
+
+endif
diff --git a/src/bin/host/README b/src/bin/host/README
index d493a95..5cc4068 100644
--- a/src/bin/host/README
+++ b/src/bin/host/README
@@ -1,14 +1,4 @@
-Rewriting host(1) in C++ from scratch using BIND 10's libdns.
+Rewriting host(1) in C++ from scratch using BIND 10's libdns++.
-Initial functionality:
-
- host _hostname_ [server]
-
-By default, it looks up the A, AAAA, and MX record sets.
-
-Note it doesn't use /etc/resolv.conf at this time.
-The default name server used is 127.0.0.1.
-
- -r disable recursive processing
- -t _type_ specific query type
- -v enable verbose output mode, including elapsed time
+The bugs and incompatibilities are listed in the manual page
+and in the source code.
diff --git a/src/bin/host/b10-host.1 b/src/bin/host/b10-host.1
new file mode 100644
index 0000000..050f6a3
--- /dev/null
+++ b/src/bin/host/b10-host.1
@@ -0,0 +1,118 @@
+'\" t
+.\" Title: b10-host
+.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: May 4, 2011
+.\" Manual: BIND10
+.\" Source: BIND10
+.\" Language: English
+.\"
+.TH "B10\-HOST" "1" "May 4, 2011" "BIND10" "BIND10"
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+b10-host \- DNS lookup utility
+.SH "SYNOPSIS"
+.HP \w'\fBb10\-host\fR\ 'u
+\fBb10\-host\fR [\fB\-a\fR] [\fB\-c\ \fR\fB\fIclass\fR\fR] [\fB\-d\fR] [\fB\-p\ \fR\fB\fIport\fR\fR] [\fB\-r\fR] [\fB\-t\ \fR\fB\fItype\fR\fR] [\fB\-v\fR] [\fIname\fR] [\fB\fIserver\fR\fR]
+.SH "DESCRIPTION"
+.PP
+The
+\fBb10\-host\fR
+utility does DNS lookups\&. Its initial goal is to be a
+\fBhost\fR(1)
+clone, but also add a few features useful for BIND 10 development testing\&.
+.PP
+By default, it looks up the A, AAAA, and MX record sets for the
+\fIname\fR\&. Optionally, you may select a name server to query against by adding the
+\fIserver\fR
+argument\&.
+.SH "OPTIONS"
+.PP
+The arguments are as follows:
+.PP
+\fB\-a\fR
+.RS 4
+Enable verbose mode and do a query for type ANY\&. (If the
+\fB\-t\fR
+option is also set, then the ANY query is not done, but it still uses verbose mode\&.)
+.RE
+.PP
+\fB\-c \fR\fB\fIclass\fR\fR
+.RS 4
+Define the class for the query\&. The default is IN (Internet)\&.
+.RE
+.PP
+\fB\-d\fR
+.RS 4
+Enable verbose output mode, including elapsed time in milliseconds\&. Verbose mode shows the header, question, answer, authority, and additional sections (if provided)\&. (Same as
+\fB\-v\fR\&.)
+.RE
+.PP
+\fB\-p \fR\fB\fIport\fR\fR
+.RS 4
+Select an alternative port for the query\&. This may be a number or a service name\&. The default is 53 (domain)\&. This is not a standard feature of
+\fBhost\fR(1)\&.
+.RE
+.PP
+\fB\-r\fR
+.RS 4
+Disable recursive processing by not setting the Recursion Desired flag in the query\&.
+.RE
+.PP
+\fB\-t \fR\fB\fItype\fR\fR
+.RS 4
+Select a specific resource record type for the query\&. By default, it looks up the A, AAAA, and MX record sets\&.
+(This overrides the
+\fB\-a\fR
+option\&.)
+.RE
+.PP
+\fB\-v\fR
+.RS 4
+Same as
+\fB\-d\fR
+option\&.
+.RE
+.SH "COMPATIBILITY / BUGS"
+.PP
+
+\fBb10\-host\fR
+does not do reverse lookups by default yet (by detecting if name is a IPv4 or IPv6 address)\&.
+.PP
+Unknown
+\fB\-c\fR
+class or
+\fB\-t\fR
+type causes
+\fBb10\-host\fR
+to Abort\&.
+.PP
+Not all types are supported yet for formatting\&. Not all switches are supported yet\&.
+.PP
+It doesn\'t use
+/etc/resolv\&.conf
+at this time\&. The default name server used is 127\&.0\&.0\&.1\&.
+.PP
+
+\fB\-p\fR
+is not a standard feature\&.
+.SH "HISTORY"
+.PP
+The C++ version of
+\fBb10\-host\fR
+was started in October 2009 by Jeremy C\&. Reed of ISC\&. Its usage and output were based on the standard
+\fBhost\fR
+command\&.
+.SH "COPYRIGHT"
+.br
+Copyright \(co 2011 Internet Systems Consortium, Inc. ("ISC")
+.br
diff --git a/src/bin/host/b10-host.xml b/src/bin/host/b10-host.xml
new file mode 100644
index 0000000..a17ef67
--- /dev/null
+++ b/src/bin/host/b10-host.xml
@@ -0,0 +1,196 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+ [<!ENTITY mdash "—">]>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<!-- $Id$ -->
+<refentry>
+
+ <refentryinfo>
+ <date>May 4, 2011</date>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>b10-host</refentrytitle>
+ <manvolnum>1</manvolnum>
+ <refmiscinfo>BIND10</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>b10-host</refname>
+ <refpurpose>DNS lookup utility</refpurpose>
+ </refnamediv>
+
+ <docinfo>
+ <copyright>
+ <year>2011</year>
+ <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+ </copyright>
+ </docinfo>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>b10-host</command>
+ <arg><option>-a</option></arg>
+ <arg><option>-c <replaceable>class</replaceable></option></arg>
+ <arg><option>-d</option></arg>
+ <arg><option>-p <replaceable>port</replaceable></option></arg>
+ <arg><option>-r</option></arg>
+ <arg><option>-t <replaceable>type</replaceable></option></arg>
+ <arg><option>-v</option></arg>
+ <arg><replaceable>name</replaceable></arg>
+ <arg><option><replaceable>server</replaceable></option></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>DESCRIPTION</title>
+ <para>
+ The <command>b10-host</command> utility does DNS lookups.
+ Its initial goal is to be a
+ <citerefentry><refentrytitle>host</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry>
+ clone, but also add a few features useful for BIND 10 development
+ testing.
+ </para>
+
+ <para>
+ By default, it looks up the A, AAAA, and MX record sets for the
+ <replaceable>name</replaceable>.
+ Optionally, you may select a name server to query against by adding
+ the <replaceable>server</replaceable> argument.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>OPTIONS</title>
+
+ <para>The arguments are as follows:</para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><option>-a</option></term>
+ <listitem><para>
+ Enable verbose mode and do a query for type ANY.
+ (If the <option>-t</option> option is also set, then the
+ ANY query is not done, but it still uses verbose mode.)
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-c <replaceable>class</replaceable></option></term>
+ <listitem><para>
+ Define the class for the query.
+ The default is IN (Internet).
+<!-- TODO: bug if class is unknown causes seg fault and possible core dump -->
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-d</option></term>
+ <listitem><para>
+ Enable verbose output mode, including elapsed time in
+ milliseconds.
+ Verbose mode shows the header, question, answer, authority,
+ and additional sections (if provided).
+ (Same as <option>-v</option>.)
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-p <replaceable>port</replaceable></option></term>
+ <listitem><para>
+ Select an alternative port for the query.
+ This may be a number or a service name.
+ The default is 53 (domain).
+ This is not a standard feature of
+ <citerefentry><refentrytitle>host</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-r</option></term>
+ <listitem><para>
+ Disable recursive processing by not setting the
+ Recursion Desired flag in the query.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-t <replaceable>type</replaceable></option></term>
+ <listitem><para>
+ Select a specific resource record type for the query.
+ By default, it looks up the A, AAAA, and MX record sets.
+<!-- TODO: bug if class is unknown causes seg fault and possible core dump -->
+ (This overrides the <option>-a</option> option.)
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-v</option></term>
+ <listitem><para>
+ Same as <option>-d</option> option.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
+ <title>COMPATIBILITY / BUGS</title>
+ <para>
+ <command>b10-host</command> does not do reverse lookups by
+ default yet (by detecting if name is a IPv4 or IPv6 address).
+ </para>
+
+ <para>
+ Unknown <option>-c</option> class or <option>-t</option> type
+ causes <command>b10-host</command> to Abort.
+ </para>
+
+ <para>
+ Not all types are supported yet for formatting.
+ Not all switches are supported yet.
+ </para>
+
+ <para>
+ It doesn't use <filename>/etc/resolv.conf</filename> at this time.
+ The default name server used is 127.0.0.1.
+ </para>
+
+ <para>
+ <option>-p</option> is not a standard feature.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>HISTORY</title>
+ <para>
+ The C++ version of <command>b10-host</command> was started in
+ October 2009 by Jeremy C. Reed of ISC.
+ Its usage and output were based on the standard <command>host</command>
+ command.
+ </para>
+ </refsect1>
+</refentry><!--
+ - Local variables:
+ - mode: sgml
+ - End:
+-->
diff --git a/src/bin/host/host.cc b/src/bin/host/host.cc
index 973509e..fb9f61e 100644
--- a/src/bin/host/host.cc
+++ b/src/bin/host/host.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -14,17 +14,24 @@
// host rewritten in C++ using BIND 10 DNS library
+#include <config.h>
+
+#ifdef _WIN32
+#include <getopt.h>
+#include <ws2tcpip.h>
+#else
#include <arpa/inet.h>
#include <netdb.h> // for getaddrinfo
-#include <sys/time.h> // for gettimeofday
#include <sys/socket.h> // networking functions and definitions on FreeBSD
#include <unistd.h>
+#endif
#include <string>
#include <iostream>
#include <util/buffer.h>
+#include <util/time_utilities.h>
#include <dns/name.h>
#include <dns/message.h>
@@ -44,13 +51,16 @@ namespace {
char* dns_type = NULL; // not set, so A, AAAA, MX
const char* server = "127.0.0.1";
const char* server_port = "53";
-int verbose = 0;
-int first_time = 1;
-bool recursive_bit = true;
-struct timeval before_time, after_time;
+const char* dns_class = "IN";
+bool verbose = false;
+bool dns_any = false;
+int first_time = 1;
+bool recursive_bit = true;
+int64_t before_time, after_time;
int
-host_lookup(const char* const name, const char* const type) {
+host_lookup(const char* const name, const char* const dns_class,
+ const char* const type, bool any) {
Message msg(Message::RENDER);
@@ -64,8 +74,8 @@ host_lookup(const char* const name, const char* const type) {
}
msg.addQuestion(Question(Name(name),
- RRClass::IN(), // IN class only for now
- RRType(type))); // if NULL then:
+ RRClass(dns_class),
+ any ? RRType::ANY() : RRType(type))); // if NULL then:
OutputBuffer obuffer(512);
MessageRenderer renderer(obuffer);
@@ -105,11 +115,11 @@ host_lookup(const char* const name, const char* const type) {
}
if (verbose) {
- gettimeofday(&before_time, NULL);
+ before_time = detail::gettimeWrapper();
}
- sendto(s, obuffer.getData(), obuffer.getLength(), 0, res->ai_addr,
- res->ai_addrlen);
+ sendto(s, (const char *) obuffer.getData(), obuffer.getLength(),
+ 0, res->ai_addr, res->ai_addrlen);
struct sockaddr_storage ss;
struct sockaddr* sa;
@@ -127,45 +137,55 @@ host_lookup(const char* const name, const char* const type) {
rmsg.fromWire(ibuffer);
if (!verbose) {
+ string description = "";
for (RRsetIterator it =
rmsg.beginSection(Message::SECTION_ANSWER);
it != rmsg.endSection(Message::SECTION_ANSWER);
++it) {
- if ((*it)->getType() != RRType::A()) {
- continue;
+
+ if ((*it)->getType() == RRType::A()) {
+ description = "has address";
+ }
+ else if ((*it)->getType() == RRType::AAAA()) {
+ description = "has IPv6 address";
+ }
+ else if ((*it)->getType() == RRType::MX()) {
+ description = "mail is handled by";
+ }
+ else if ((*it)->getType() == RRType::TXT()) {
+ description = "descriptive text";
}
RdataIteratorPtr rit = (*it)->getRdataIterator();
for (; !rit->isLast(); rit->next()) {
// instead of using my name, maybe use returned label?
- cout << name << " has address " <<
+ cout << name << " " << description << " " <<
(*rit).getCurrent().toText() << endl;
}
}
} else {
- gettimeofday(&after_time, NULL);
+ after_time = detail::gettimeWrapper();
// HEADER and QUESTION, ANSWER, AUTHORITY, and ADDITIONAL
std::cout << rmsg.toText() << std::endl;
- if (before_time.tv_usec > after_time.tv_usec) {
- after_time.tv_usec += 1000000;
- --after_time.tv_sec;
- }
-
- int elapsed_time =
- (after_time.tv_sec - before_time.tv_sec)
- + ((after_time.tv_usec - before_time.tv_usec))/1000;
+ int elapsed_time = (int) (after_time - before_time);
// TODO: if NXDOMAIN, host(1) doesn't show HEADER
// Host hsdjkfhksjhdfkj not found: 3(NXDOMAIN)
- // TODO: figure out the new libdns way to test if NXDOMAIN
+ // TODO: test if NXDOMAIN
std::cout << "Received " << cc <<
" bytes in " << elapsed_time << " ms\n";
// TODO: " bytes from 127.0.0.1#53 in 0 ms
} //verbose
+/*
+TODO: handle InvalidRRClass
+TODO: handle invalid type exception
+ } catch (InvalidType ivt) {
+ std::cerr << "invalid type:" << ivt.what();
+*/
} catch (const exception& ex) {
std::cerr << "parse failed for " <<
string(name) << "/" << type << ": " << ex.what() << std::endl;
@@ -184,26 +204,36 @@ int
main(int argc, char* argv[]) {
int c;
- while ((c = getopt(argc, argv, "p:rt:v")) != -1)
+ while ((c = getopt(argc, argv, "ac:dp:rt:v")) != -1)
switch (c) {
+ case 'a':
+ dns_any = true;
+ verbose = true;
+ break;
+ case 'c':
+ dns_class = optarg;
+ break;
+ // p for port is a non-standard switch
+ case 'p':
+ server_port = optarg;
+ break;
case 'r':
recursive_bit = false;
break;
case 't':
dns_type = optarg;
break;
- case 'p':
- server_port = optarg;
- break;
+ case 'd':
+ // drop through to v, because debug and verbose are equivalent
case 'v':
- verbose = 1;
+ verbose = true;
break;
}
argc -= optind;
argv += optind;
if (argc < 1) {
- cout << "Usage: host [-vr] [-t type] hostname [server]\n";
+ cout << "Usage: host [-adprv] [-c class] [-t type] hostname [server]\n";
exit(1);
}
@@ -211,13 +241,24 @@ main(int argc, char* argv[]) {
server = argv[1];
}
+#ifdef _WIN32
+ WSADATA wsaData;
+ WSAStartup(MAKEWORD(2,2), &wsaData);
+#endif
+
if (dns_type == NULL) {
- host_lookup(argv[0], "A");
+ host_lookup(argv[0], dns_class, "A", dns_any);
// TODO: don't do next if A doesn't exist
- host_lookup(argv[0], "AAAA");
- host_lookup(argv[0], "MX");
+ host_lookup(argv[0], dns_class, "AAAA", dns_any);
+ host_lookup(argv[0], dns_class, "MX", dns_any);
} else {
- host_lookup(argv[0], dns_type);
+ // -t overrides -a, regardless of order
+ host_lookup(argv[0], dns_class, dns_type, false);
}
+
+#ifdef _WIN32
+ WSACleanup();
+#endif
+
return (0);
}
diff --git a/src/bin/loadzone/Makefile.am b/src/bin/loadzone/Makefile.am
index 74d4dd4..a235d68 100644
--- a/src/bin/loadzone/Makefile.am
+++ b/src/bin/loadzone/Makefile.am
@@ -1,5 +1,6 @@
SUBDIRS = . tests/correct tests/error
bin_SCRIPTS = b10-loadzone
+noinst_SCRIPTS = run_loadzone.sh
CLEANFILES = b10-loadzone
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
old mode 100644
new mode 100755
index b7ac19f..43b7920
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -18,9 +18,17 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index a90cab2..fb882ba 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,8 +13,17 @@ EXTRA_DIST += ttl2.db
EXTRA_DIST += ttlext.db
EXTRA_DIST += example.db
+noinst_SCRIPTS = correct_test.sh
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# TODO: maybe use TESTS?
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
echo Running test: correct_test.sh
- $(SHELL) $(abs_builddir)/correct_test.sh
+ $(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/correct_test.sh
diff --git a/src/bin/loadzone/tests/correct/correct_test.sh.in b/src/bin/loadzone/tests/correct/correct_test.sh.in
old mode 100644
new mode 100755
index 509d8e5..d944451
--- a/src/bin/loadzone/tests/correct/correct_test.sh.in
+++ b/src/bin/loadzone/tests/correct/correct_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index bbeec07..03263b7 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,8 +12,17 @@ EXTRA_DIST += keyerror3.db
EXTRA_DIST += originerr1.db
EXTRA_DIST += originerr2.db
+noinst_SCRIPTS = error_test.sh
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# TODO: use TESTS ?
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
echo Running test: error_test.sh
- $(SHELL) $(abs_builddir)/error_test.sh
+ $(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/error_test.sh
diff --git a/src/bin/loadzone/tests/error/error_test.sh.in b/src/bin/loadzone/tests/error/error_test.sh.in
old mode 100644
new mode 100755
index d1d6bd1..94c5edb
--- a/src/bin/loadzone/tests/error/error_test.sh.in
+++ b/src/bin/loadzone/tests/error/error_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/msgq/Makefile.am b/src/bin/msgq/Makefile.am
index 61d4f23..0eebf00 100644
--- a/src/bin/msgq/Makefile.am
+++ b/src/bin/msgq/Makefile.am
@@ -20,3 +20,8 @@ endif
b10-msgq: msgq.py
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" msgq.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 0bbb964..50b218b 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = msgq_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,7 +18,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/msgq \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/msgq/tests/msgq_test.py b/src/bin/msgq/tests/msgq_test.py
index f926845..fe4f7d4 100644
--- a/src/bin/msgq/tests/msgq_test.py
+++ b/src/bin/msgq/tests/msgq_test.py
@@ -132,7 +132,7 @@ class SendNonblock(unittest.TestCase):
task()
# If we got here, then everything worked well and in time
# In that case, we terminate successfully
- sys.exit(0) # needs exit code
+ os._exit(0) # needs exit code
else:
(pid, status) = os.waitpid(task_pid, 0)
self.assertEqual(0, status,
@@ -202,7 +202,7 @@ class SendNonblock(unittest.TestCase):
try:
def killall(signum, frame):
os.kill(queue_pid, signal.SIGTERM)
- sys.exit(1)
+ os._exit(1)
signal.signal(signal.SIGALRM, killall)
msg = msgq.preparemsg({"type" : "ping"}, data)
now = time.clock()
diff --git a/src/bin/resolver/Makefile.am b/src/bin/resolver/Makefile.am
index 094e3ad..3f5f049 100644
--- a/src/bin/resolver/Makefile.am
+++ b/src/bin/resolver/Makefile.am
@@ -18,10 +18,12 @@ endif
pkglibexecdir = $(libexecdir)/@PACKAGE@
-CLEANFILES = *.gcno *.gcda resolver.spec spec_config.h
+CLEANFILES = *.gcno *.gcda
+CLEANFILES += resolver.spec spec_config.h
+CLEANFILES += resolver_messages.cc resolver_messages.h
man_MANS = b10-resolver.8
-EXTRA_DIST = $(man_MANS) b10-resolver.xml
+EXTRA_DIST = $(man_MANS) b10-resolver.xml resolver_messages.mes
if ENABLE_MAN
@@ -36,16 +38,29 @@ resolver.spec: resolver.spec.pre
spec_config.h: spec_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
-BUILT_SOURCES = spec_config.h
+# Define rule to build logging source files from message file
+resolver_messages.h resolver_messages.cc: resolver_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/resolver/resolver_messages.mes
+
+
+BUILT_SOURCES = spec_config.h resolver_messages.cc resolver_messages.h
+
pkglibexec_PROGRAMS = b10-resolver
b10_resolver_SOURCES = resolver.cc resolver.h
+b10_resolver_SOURCES += resolver_log.cc resolver_log.h
b10_resolver_SOURCES += response_scrubber.cc response_scrubber.h
b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/change_user.h
b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/common.h
b10_resolver_SOURCES += main.cc
+
+nodist_b10_resolver_SOURCES = resolver_messages.cc resolver_messages.h
+
+
b10_resolver_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
b10_resolver_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_resolver_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/util/libutil.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
b10_resolver_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_resolver_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
b10_resolver_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 849092c..9161ec2 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -2,12 +2,12 @@
.\" Title: b10-resolver
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: February 17, 2011
+.\" Date: August 17, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "August 17, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -54,7 +54,7 @@ must be either a valid numeric user ID or a valid user name\&. By default the da
.PP
\fB\-v\fR
.RS 4
-Enabled verbose mode\&. This enables diagnostic messages to STDERR\&.
+Enable verbose mode\&. This sets logging to the maximum debugging level\&.
.RE
.SH "CONFIGURATION AND COMMANDS"
.PP
@@ -77,6 +77,25 @@ string and
number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
.PP
+
+
+
+
+
+\fIquery_acl\fR
+is a list of query access control rules\&. The list items are the
+\fIaction\fR
+string and the
+\fIfrom\fR
+or
+\fIkey\fR
+strings\&. The possible actions are ACCEPT, REJECT and DROP\&. The
+\fIfrom\fR
+is a remote (source) IPv4 or IPv6 address or special keyword\&. The
+\fIkey\fR
+is a TSIG key name\&. The default configuration accepts queries from 127\&.0\&.0\&.1 and ::1\&.
+.PP
+
\fIretries\fR
is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
.PP
@@ -88,7 +107,7 @@ to use directly as root servers to start resolving\&. The list items are the
\fIaddress\fR
string and
\fIport\fR
-number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+number\&. By default, a hardcoded address for l\&.root\-servers\&.net (199\&.7\&.83\&.42 or 2001:500:3::42) is used\&.
.PP
\fItimeout_client\fR
@@ -121,8 +140,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&. Caching was implemented in February 2011\&. Access control was introduced in June 2011\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..75cced7 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>February 17, 2011</date>
+ <date>August 17, 2011</date>
</refentryinfo>
<refmeta>
@@ -99,11 +99,14 @@
</listitem>
</varlistentry>
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
<varlistentry>
<term><option>-v</option></term>
<listitem><para>
- Enabled verbose mode. This enables diagnostic messages to
- STDERR.
+ Enable verbose mode.
+ This sets logging to the maximum debugging level.
</para></listitem>
</varlistentry>
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
</para>
<para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+ <varname>query_acl</varname> is a list of query access control
+ rules. The list items are the <varname>action</varname> string
+ and the <varname>from</varname> or <varname>key</varname> strings.
+ The possible actions are ACCEPT, REJECT and DROP.
+ The <varname>from</varname> is a remote (source) IPv4 or IPv6
+ address or special keyword.
+ The <varname>key</varname> is a TSIG key name.
+ The default configuration accepts queries from 127.0.0.1 and ::1.
+ </para>
+
+ <para>
<varname>retries</varname> is the number of times to retry
(resend query) after a query timeout
(<varname>timeout_query</varname>).
@@ -159,8 +178,10 @@ once that is merged you can for instance do 'config add Resolver/forward_address
root servers to start resolving.
The list items are the <varname>address</varname> string
and <varname>port</varname> number.
- If empty, a hardcoded address for F-root (192.5.5.241) is used.
+ By default, a hardcoded address for l.root-servers.net
+ (199.7.83.42 or 2001:500:3::42) is used.
</para>
+<!-- TODO: this is broken, see ticket #1184 -->
<para>
<varname>timeout_client</varname> is the number of milliseconds
@@ -234,7 +255,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
The <command>b10-resolver</command> daemon was first coded in
September 2010. The initial implementation only provided
forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+ Caching was implemented in February 2011.
+ Access control was introduced in June 2011.
<!-- TODO: document when validation was added -->
</para>
</refsect1>
diff --git a/src/bin/resolver/main.cc b/src/bin/resolver/main.cc
index 5103bf9..9b5faf6 100644
--- a/src/bin/resolver/main.cc
+++ b/src/bin/resolver/main.cc
@@ -14,11 +14,16 @@
#include <config.h>
+#ifdef _WIN32
+#include <getopt.h>
+#include <ws2tcpip.h>
+#else
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/select.h>
#include <netdb.h>
#include <netinet/in.h>
+#endif
#include <stdlib.h>
#include <errno.h>
@@ -43,8 +48,10 @@
#include <xfr/xfrout_client.h>
+#ifndef _WIN32
#include <auth/change_user.h>
#include <auth/common.h>
+#endif
#include <resolver/spec_config.h>
#include <resolver/resolver.h>
@@ -52,13 +59,14 @@
#include <cache/resolver_cache.h>
#include <nsas/nameserver_address_store.h>
-#include <log/dummylog.h>
+#include <log/logger_support.h>
+#include <log/logger_level.h>
+#include "resolver_log.h"
using namespace std;
using namespace isc::cc;
using namespace isc::config;
using namespace isc::data;
-using isc::log::dlog;
using namespace isc::asiodns;
using namespace isc::asiolink;
@@ -79,7 +87,7 @@ my_command_handler(const string& command, ConstElementPtr args) {
ConstElementPtr answer = createAnswer();
if (command == "print_message") {
- cout << args << endl;
+ LOG_INFO(resolver_logger, RESOLVER_PRINT_COMMAND).arg(args);
/* let's add that message to our answer as well */
answer = createAnswer(0, args);
} else if (command == "shutdown") {
@@ -100,7 +108,7 @@ usage() {
int
main(int argc, char* argv[]) {
- isc::log::dprefix = "b10-resolver";
+ bool verbose = false;
int ch;
const char* uid = NULL;
@@ -110,7 +118,7 @@ main(int argc, char* argv[]) {
uid = optarg;
break;
case 'v':
- isc::log::denabled = true;
+ verbose = true;
break;
case '?':
default:
@@ -122,13 +130,23 @@ main(int argc, char* argv[]) {
usage();
}
- if (isc::log::denabled) { // Show the command line
- string cmdline("Command line:");
- for (int i = 0; i < argc; ++ i) {
- cmdline = cmdline + " " + argv[i];
- }
- dlog(cmdline);
+#ifdef _WIN32
+ WSADATA wsaData;
+ WSAStartup(MAKEWORD(2,2), &wsaData);
+#endif
+
+ // Until proper logging comes along, initialize the logging with the
+ // temporary initLogger() code. If verbose, we'll use maximum verbosity.
+ isc::log::initLogger("b10-resolver",
+ (verbose ? isc::log::DEBUG : isc::log::INFO),
+ isc::log::MAX_DEBUG_LEVEL, NULL);
+
+ // Print the starting message
+ string cmdline = argv[0];
+ for (int i = 1; i < argc; ++ i) {
+ cmdline = cmdline + " " + argv[i];
}
+ LOG_INFO(resolver_logger, RESOLVER_STARTING).arg(cmdline);
int ret = 0;
@@ -144,7 +162,7 @@ main(int argc, char* argv[]) {
}
resolver = boost::shared_ptr<Resolver>(new Resolver());
- dlog("Server created.");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CREATED);
SimpleCallback* checkin = resolver->getCheckinProvider();
DNSLookup* lookup = resolver->getDNSLookupProvider();
@@ -197,33 +215,41 @@ main(int argc, char* argv[]) {
DNSService dns_service(io_service, checkin, lookup, answer);
resolver->setDNSService(dns_service);
- dlog("IOService created.");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_SERVICE_CREATED);
cc_session = new Session(io_service.get_io_service());
- dlog("Configuration session channel created.");
-
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
my_command_handler);
- dlog("Configuration channel established.");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_CHANNEL);
+#ifndef _WIN32
// FIXME: This does not belong here, but inside Boss
if (uid != NULL) {
changeUser(uid);
}
+#endif
resolver->setConfigSession(config_session);
- dlog("Config loaded");
+ // Install all initial configurations. If loading configuration
+ // fails, it will be logged, but we start the server anyway, giving
+ // the user a second chance to correct the configuration.
+ resolver->updateConfig(config_session->getFullConfig());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_LOADED);
- dlog("Server started.");
+ LOG_INFO(resolver_logger, RESOLVER_STARTED);
io_service.run();
} catch (const std::exception& ex) {
- dlog(string("Server failed: ") + ex.what(),true);
+ LOG_FATAL(resolver_logger, RESOLVER_FAILED).arg(ex.what());
ret = 1;
}
delete config_session;
delete cc_session;
+ LOG_INFO(resolver_logger, RESOLVER_SHUTDOWN);
+#ifdef _WIN32
+ WSACleanup();
+#endif
return (ret);
}
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index 591e214..d1ba0eb 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -14,18 +14,26 @@
#include <config.h>
+#include <stdint.h>
+#ifndef _WIN32
#include <netinet/in.h>
+#endif
#include <algorithm>
#include <vector>
#include <cassert>
+#include <boost/shared_ptr.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <acl/dns.h>
+#include <acl/loader.h>
+
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
-#include <boost/foreach.hpp>
-#include <boost/lexical_cast.hpp>
-
#include <config/ccsession.h>
#include <exceptions/exceptions.h>
@@ -41,24 +49,27 @@
#include <dns/rrttl.h>
#include <dns/message.h>
#include <dns/messagerenderer.h>
+
+#include <server_common/client.h>
#include <server_common/portconfig.h>
#include <resolve/recursive_query.h>
-#include <log/dummylog.h>
-
-#include <resolver/resolver.h>
+#include "resolver.h"
+#include "resolver_log.h"
using namespace std;
using namespace isc;
using namespace isc::util;
+using namespace isc::acl;
+using isc::acl::dns::RequestACL;
using namespace isc::dns;
using namespace isc::data;
using namespace isc::config;
-using isc::log::dlog;
using namespace isc::asiodns;
using namespace isc::asiolink;
+using namespace isc::server_common;
using namespace isc::server_common::portconfig;
class ResolverImpl {
@@ -73,6 +84,9 @@ public:
client_timeout_(4000),
lookup_timeout_(30000),
retries_(3),
+ // we apply "reject all" (implicit default of the loader) ACL by
+ // default:
+ query_acl_(acl::dns::getRequestLoader().load(Element::fromJSON("[]"))),
rec_query_(NULL)
{}
@@ -85,7 +99,7 @@ public:
isc::cache::ResolverCache& cache)
{
assert(!rec_query_); // queryShutdown must be called first
- dlog("Query setup");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUERY_SETUP);
rec_query_ = new RecursiveQuery(dnss,
nsas, cache,
upstream_,
@@ -101,7 +115,8 @@ public:
// (this is not a safety check, just to prevent logging of
// actions that are not performed
if (rec_query_) {
- dlog("Query shutdown");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT,
+ RESOLVER_QUERY_SHUTDOWN);
delete rec_query_;
rec_query_ = NULL;
}
@@ -113,13 +128,12 @@ public:
upstream_ = upstream;
if (dnss) {
if (!upstream_.empty()) {
- dlog("Setting forward addresses:");
BOOST_FOREACH(const AddressPair& address, upstream) {
- dlog(" " + address.first + ":" +
- boost::lexical_cast<string>(address.second));
+ LOG_INFO(resolver_logger, RESOLVER_FORWARD_ADDRESS)
+ .arg(address.first).arg(address.second);
}
} else {
- dlog("No forward addresses, running in recursive mode");
+ LOG_INFO(resolver_logger, RESOLVER_RECURSIVE);
}
}
}
@@ -130,13 +144,12 @@ public:
upstream_root_ = upstream_root;
if (dnss) {
if (!upstream_root_.empty()) {
- dlog("Setting root addresses:");
BOOST_FOREACH(const AddressPair& address, upstream_root) {
- dlog(" " + address.first + ":" +
- boost::lexical_cast<string>(address.second));
+ LOG_INFO(resolver_logger, RESOLVER_SET_ROOT_ADDRESS)
+ .arg(address.first).arg(address.second);
}
} else {
- dlog("No root addresses");
+ LOG_WARN(resolver_logger, RESOLVER_NO_ROOT_ADDRESS);
}
}
}
@@ -144,10 +157,20 @@ public:
void resolve(const isc::dns::QuestionPtr& question,
const isc::resolve::ResolverInterface::CallbackPtr& callback);
- void processNormalQuery(const Question& question,
- MessagePtr answer_message,
- OutputBufferPtr buffer,
- DNSServer* server);
+ enum NormalQueryResult { RECURSION, DROPPED, ERROR };
+ NormalQueryResult processNormalQuery(const IOMessage& io_message,
+ MessagePtr query_message,
+ MessagePtr answer_message,
+ OutputBufferPtr buffer,
+ DNSServer* server);
+
+ const RequestACL& getQueryACL() const {
+ return (*query_acl_);
+ }
+
+ void setQueryACL(boost::shared_ptr<const RequestACL> new_acl) {
+ query_acl_ = new_acl;
+ }
/// Currently non-configurable, but will be.
static const uint16_t DEFAULT_LOCAL_UDPSIZE = 4096;
@@ -172,6 +195,8 @@ public:
unsigned retries_;
private:
+ /// ACL on incoming queries
+ boost::shared_ptr<const RequestACL> query_acl_;
/// Object to handle upstream queries
RecursiveQuery* rec_query_;
@@ -186,8 +211,6 @@ class QuestionInserter {
public:
QuestionInserter(MessagePtr message) : message_(message) {}
void operator()(const QuestionPtr question) {
- dlog(string("Adding question ") + question->getName().toText() +
- " to message");
message_->addQuestion(question);
}
MessagePtr message_;
@@ -234,10 +257,6 @@ makeErrorMessage(MessagePtr message, MessagePtr answer_message,
message->setRcode(rcode);
MessageRenderer renderer(*buffer);
message->toWire(renderer);
-
- dlog(string("Sending an error response (") +
- boost::lexical_cast<string>(renderer.getLength()) + " bytes):\n" +
- message->toText());
}
// This is a derived class of \c DNSLookup, to serve as a
@@ -312,9 +331,9 @@ public:
answer_message->toWire(renderer);
- dlog(string("sending a response (") +
- boost::lexical_cast<string>(renderer.getLength()) + "bytes): \n" +
- answer_message->toText());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+ RESOLVER_DNS_MESSAGE_SENT)
+ .arg(renderer.getLength()).arg(*answer_message);
}
};
@@ -335,9 +354,12 @@ private:
Resolver::Resolver() :
impl_(new ResolverImpl()),
+ dnss_(NULL),
checkin_(new ConfigCheck(this)),
dns_lookup_(new MessageLookup(this)),
dns_answer_(new MessageAnswer),
+ nsas_(NULL),
+ cache_(NULL),
configured_(false)
{}
@@ -391,21 +413,25 @@ Resolver::processMessage(const IOMessage& io_message,
OutputBufferPtr buffer,
DNSServer* server)
{
- dlog("Got a DNS message");
InputBuffer request_buffer(io_message.getData(), io_message.getDataSize());
// First, check the header part. If we fail even for the base header,
// just drop the message.
+
+ // In the following code, the debug output is such that there should only be
+ // one debug message if packet processing failed. There could be two if
+ // it succeeded.
try {
query_message->parseHeader(request_buffer);
// Ignore all responses.
if (query_message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- dlog("Received unexpected response, ignoring");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXPECTED_RESPONSE);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- dlog(string("DNS packet exception: ") + ex.what(),true);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HEADER_ERROR)
+ .arg(ex.what());
server->resume(false);
return;
}
@@ -414,68 +440,63 @@ Resolver::processMessage(const IOMessage& io_message,
try {
query_message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- dlog(string("returning ") + error.getRcode().toText() + ": " +
- error.what());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTOCOL_ERROR)
+ .arg(error.what()).arg(error.getRcode());
makeErrorMessage(query_message, answer_message,
buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- dlog(string("returning SERVFAIL: ") + ex.what());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_MESSAGE_ERROR)
+ .arg(ex.what()).arg(Rcode::SERVFAIL());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::SERVFAIL());
server->resume(true);
return;
- } // other exceptions will be handled at a higher layer.
+ } // Other exceptions will be handled at a higher layer.
- dlog("received a message:\n" + query_message->toText());
+ // Note: there appears to be no LOG_DEBUG for a successfully-received
+ // message. This is not an oversight - it is handled below. In the
+ // meantime, output the full message for debug purposes (if requested).
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+ RESOLVER_DNS_MESSAGE_RECEIVED).arg(*query_message);
// Perform further protocol-level validation.
- bool sendAnswer = true;
+ bool send_answer = true;
if (query_message->getOpcode() == Opcode::NOTIFY()) {
+
makeErrorMessage(query_message, answer_message,
buffer, Rcode::NOTAUTH());
- dlog("Notify arrived, but we are not authoritative");
+ // Notify arrived, but we are not authoritative.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_NOTIFY_RECEIVED);
} else if (query_message->getOpcode() != Opcode::QUERY()) {
- dlog("Unsupported opcode (got: " + query_message->getOpcode().toText() +
- ", expected: " + Opcode::QUERY().toText());
+ // Unsupported opcode.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_UNSUPPORTED_OPCODE).arg(query_message->getOpcode());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::NOTIMP());
} else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
- dlog("The query contained " +
- boost::lexical_cast<string>(query_message->getRRCount(
- Message::SECTION_QUESTION) + " questions, exactly one expected"));
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::FORMERR());
+ // Not one question
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_NOT_ONE_QUESTION)
+ .arg(query_message->getRRCount(Message::SECTION_QUESTION));
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::FORMERR());
} else {
- ConstQuestionPtr question = *query_message->beginQuestion();
- const RRType &qtype = question->getType();
- if (qtype == RRType::AXFR()) {
- if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::FORMERR());
- } else {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::NOTIMP());
- }
- } else if (qtype == RRType::IXFR()) {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::NOTIMP());
- } else if (question->getClass() != RRClass::IN()) {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::REFUSED());
- } else {
+ const ResolverImpl::NormalQueryResult result =
+ impl_->processNormalQuery(io_message, query_message,
+ answer_message, buffer, server);
+ if (result == ResolverImpl::RECURSION) {
// The RecursiveQuery object will post the "resume" event to the
// DNSServer when an answer arrives, so we don't have to do it now.
- sendAnswer = false;
- impl_->processNormalQuery(*question, answer_message,
- buffer, server);
+ return;
+ } else if (result == ResolverImpl::DROPPED) {
+ send_answer = false;
}
}
- if (sendAnswer) {
- server->resume(true);
- }
+ server->resume(send_answer);
}
void
@@ -485,19 +506,85 @@ ResolverImpl::resolve(const QuestionPtr& question,
rec_query_->resolve(question, callback);
}
-void
-ResolverImpl::processNormalQuery(const Question& question,
+ResolverImpl::NormalQueryResult
+ResolverImpl::processNormalQuery(const IOMessage& io_message,
+ MessagePtr query_message,
MessagePtr answer_message,
OutputBufferPtr buffer,
DNSServer* server)
{
- dlog("Processing normal query");
- rec_query_->resolve(question, answer_message, buffer, server);
+ const ConstQuestionPtr question = *query_message->beginQuestion();
+ const RRType qtype = question->getType();
+ const RRClass qclass = question->getClass();
+
+ // Apply query ACL
+ const Client client(io_message);
+ const BasicAction query_action(
+ getQueryACL().execute(acl::dns::RequestContext(
+ client.getRequestSourceIPAddress(),
+ query_message->getTSIGRecord())));
+ if (query_action == isc::acl::REJECT) {
+ LOG_INFO(resolver_logger, RESOLVER_QUERY_REJECTED)
+ .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::REFUSED());
+ return (ERROR);
+ } else if (query_action == isc::acl::DROP) {
+ LOG_INFO(resolver_logger, RESOLVER_QUERY_DROPPED)
+ .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
+ return (DROPPED);
+ }
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_QUERY_ACCEPTED)
+ .arg(question->getName()).arg(qtype).arg(question->getClass())
+ .arg(client);
+
+ // ACL passed. Reject inappropriate queries for the resolver.
+ if (qtype == RRType::AXFR()) {
+ if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
+ // Can't process AXFR request receoved over UDP
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_UDP);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::FORMERR());
+ } else {
+ // ... or over TCP for that matter
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_TCP);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::NOTIMP());
+ }
+ return (ERROR);
+ } else if (qtype == RRType::IXFR()) {
+ // Can't process IXFR request
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_IXFR);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::NOTIMP());
+ return (ERROR);
+ } else if (qclass != RRClass::IN()) {
+ // Non-IN message received, refuse it.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NON_IN_PACKET)
+ .arg(question->getClass());
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::REFUSED());
+ return (ERROR);
+ }
+
+ // Everything is okay. Start resolver.
+ if (upstream_.empty()) {
+ // Processing normal query
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_NORMAL_QUERY);
+ rec_query_->resolve(*question, answer_message, buffer, server);
+ } else {
+ // Processing forward query
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_FORWARD_QUERY);
+ rec_query_->forward(query_message, answer_message, buffer, server);
+ }
+
+ return (RECURSION);
}
ConstElementPtr
Resolver::updateConfig(ConstElementPtr config) {
- dlog("New config comes: " + config->toWire());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIG_UPDATED)
+ .arg(*config);
try {
// Parse forward_addresses
@@ -510,6 +597,10 @@ Resolver::updateConfig(ConstElementPtr config) {
ConstElementPtr listenAddressesE(config->get("listen_on"));
AddressList listenAddresses(parseAddresses(listenAddressesE,
"listen_on"));
+ const ConstElementPtr query_acl_cfg(config->get("query_acl"));
+ const boost::shared_ptr<const RequestACL> query_acl =
+ query_acl_cfg ? acl::dns::getRequestLoader().load(query_acl_cfg) :
+ boost::shared_ptr<RequestACL>();
bool set_timeouts(false);
int qtimeout = impl_->query_timeout_;
int ctimeout = impl_->client_timeout_;
@@ -524,6 +615,8 @@ Resolver::updateConfig(ConstElementPtr config) {
// check for us
qtimeout = qtimeoutE->intValue();
if (qtimeout < -1) {
+ LOG_ERROR(resolver_logger, RESOLVER_QUERY_TIME_SMALL)
+ .arg(qtimeout);
isc_throw(BadValue, "Query timeout too small");
}
set_timeouts = true;
@@ -531,6 +624,8 @@ Resolver::updateConfig(ConstElementPtr config) {
if (ctimeoutE) {
ctimeout = ctimeoutE->intValue();
if (ctimeout < -1) {
+ LOG_ERROR(resolver_logger, RESOLVER_CLIENT_TIME_SMALL)
+ .arg(ctimeout);
isc_throw(BadValue, "Client timeout too small");
}
set_timeouts = true;
@@ -538,12 +633,19 @@ Resolver::updateConfig(ConstElementPtr config) {
if (ltimeoutE) {
ltimeout = ltimeoutE->intValue();
if (ltimeout < -1) {
+ LOG_ERROR(resolver_logger, RESOLVER_LOOKUP_TIME_SMALL)
+ .arg(ltimeout);
isc_throw(BadValue, "Lookup timeout too small");
}
set_timeouts = true;
}
if (retriesE) {
+ // Do the assignment from "retriesE->intValue()" to "retries"
+ // _after_ the comparison (as opposed to before it for the timeouts)
+ // because "retries" is unsigned.
if (retriesE->intValue() < 0) {
+ LOG_ERROR(resolver_logger, RESOLVER_NEGATIVE_RETRIES)
+ .arg(retriesE->intValue());
isc_throw(BadValue, "Negative number of retries");
}
retries = retriesE->intValue();
@@ -556,15 +658,6 @@ Resolver::updateConfig(ConstElementPtr config) {
if (listenAddressesE) {
setListenAddresses(listenAddresses);
need_query_restart = true;
- } else {
- if (!configured_) {
- // TODO: ModuleSpec needs getDefault()
- AddressList initial_addresses;
- initial_addresses.push_back(AddressPair("127.0.0.1", 53));
- initial_addresses.push_back(AddressPair("::1", 53));
- setListenAddresses(initial_addresses);
- need_query_restart = true;
- }
}
if (forwardAddressesE) {
setForwardAddresses(forwardAddresses);
@@ -578,6 +671,9 @@ Resolver::updateConfig(ConstElementPtr config) {
setTimeouts(qtimeout, ctimeout, ltimeout, retries);
need_query_restart = true;
}
+ if (query_acl) {
+ setQueryACL(query_acl);
+ }
if (need_query_restart) {
impl_->queryShutdown();
@@ -585,8 +681,11 @@ Resolver::updateConfig(ConstElementPtr config) {
}
setConfigured();
return (isc::config::createAnswer());
+
} catch (const isc::Exception& error) {
- dlog(string("error in config: ") + error.what(),true);
+
+ // Configuration error
+ LOG_ERROR(resolver_logger, RESOLVER_CONFIG_ERROR).arg(error.what());
return (isc::config::createAnswer(1, error.what()));
}
}
@@ -626,10 +725,10 @@ Resolver::setListenAddresses(const AddressList& addresses) {
void
Resolver::setTimeouts(int query_timeout, int client_timeout,
int lookup_timeout, unsigned retries) {
- dlog("Setting query timeout to " + boost::lexical_cast<string>(query_timeout) +
- ", client timeout to " + boost::lexical_cast<string>(client_timeout) +
- ", lookup timeout to " + boost::lexical_cast<string>(lookup_timeout) +
- " and retry count to " + boost::lexical_cast<string>(retries));
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_SET_PARAMS)
+ .arg(query_timeout).arg(client_timeout).arg(lookup_timeout)
+ .arg(retries);
+
impl_->query_timeout_ = query_timeout;
impl_->client_timeout_ = client_timeout;
impl_->lookup_timeout_ = lookup_timeout;
@@ -660,3 +759,18 @@ AddressList
Resolver::getListenAddresses() const {
return (impl_->listen_);
}
+
+const RequestACL&
+Resolver::getQueryACL() const {
+ return (impl_->getQueryACL());
+}
+
+void
+Resolver::setQueryACL(boost::shared_ptr<const RequestACL> new_acl) {
+ if (!new_acl) {
+ isc_throw(InvalidParameter, "NULL pointer is passed to setQueryACL");
+ }
+
+ LOG_INFO(resolver_logger, RESOLVER_SET_QUERY_ACL);
+ impl_->setQueryACL(new_acl);
+}
diff --git a/src/bin/resolver/resolver.h b/src/bin/resolver/resolver.h
index 2890dd3..4b9c773 100644
--- a/src/bin/resolver/resolver.h
+++ b/src/bin/resolver/resolver.h
@@ -19,8 +19,11 @@
#include <vector>
#include <utility>
+#include <boost/shared_ptr.hpp>
+
#include <cc/data.h>
#include <config/ccsession.h>
+#include <acl/dns.h>
#include <dns/message.h>
#include <util/buffer.h>
@@ -236,6 +239,25 @@ public:
*/
int getRetries() const;
+ /// Get the query ACL.
+ ///
+ /// \exception None
+ const isc::acl::dns::RequestACL& getQueryACL() const;
+
+ /// Set the new query ACL.
+ ///
+ /// This method replaces the existing query ACL completely.
+ /// Normally this method will be called via the configuration handler,
+ /// but is publicly available for convenience of tests (and other
+ /// experimental purposes).
+ /// \c new_acl must not be a NULL pointer.
+ ///
+ /// \exception InvalidParameter The given pointer is NULL
+ ///
+ /// \param new_acl The new ACL to replace the existing one.
+ void setQueryACL(boost::shared_ptr<const isc::acl::dns::RequestACL>
+ new_acl);
+
private:
ResolverImpl* impl_;
isc::asiodns::DNSService* dnss_;
diff --git a/src/bin/resolver/resolver.spec.pre.in b/src/bin/resolver/resolver.spec.pre.in
index 9df1e75..076ef85 100644
--- a/src/bin/resolver/resolver.spec.pre.in
+++ b/src/bin/resolver/resolver.spec.pre.in
@@ -113,6 +113,41 @@
}
]
}
+ },
+ {
+ "item_name": "query_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "action": "ACCEPT",
+ "from": "127.0.0.1"
+ },
+ {
+ "action": "ACCEPT",
+ "from": "::1"
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "rule",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "action",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "from",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/resolver/resolver.spec.pre.win32 b/src/bin/resolver/resolver.spec.pre.win32
new file mode 100644
index 0000000..076ef85
--- /dev/null
+++ b/src/bin/resolver/resolver.spec.pre.win32
@@ -0,0 +1,162 @@
+{
+ "module_spec": {
+ "module_name": "Resolver",
+ "module_description": "Recursive service",
+ "config_data": [
+ {
+ "item_name": "timeout_query",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 2000
+ },
+ {
+ "item_name": "timeout_client",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 4000
+ },
+ {
+ "item_name": "timeout_lookup",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 30000
+ },
+ {
+ "item_name": "retries",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 3
+ },
+ {
+ "item_name": "forward_addresses",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec" : {
+ "item_name": "address",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "address",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "::1"
+ },
+ {
+ "item_name": "port",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 53
+ }
+ ]
+ }
+ },
+ {
+ "item_name": "root_addresses",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec" : {
+ "item_name": "address",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "address",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "::1"
+ },
+ {
+ "item_name": "port",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 53
+ }
+ ]
+ }
+ },
+ {
+ "item_name": "listen_on",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "address": "::1",
+ "port": 53
+ },
+ {
+ "address": "127.0.0.1",
+ "port": 53
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "address",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "address",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "::1"
+ },
+ {
+ "item_name": "port",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 53
+ }
+ ]
+ }
+ },
+ {
+ "item_name": "query_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "action": "ACCEPT",
+ "from": "127.0.0.1"
+ },
+ {
+ "action": "ACCEPT",
+ "from": "::1"
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "rule",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "action",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "from",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ }
+ ]
+ }
+ }
+ ],
+ "commands": [
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down recursive DNS server",
+ "command_args": []
+ }
+ ]
+ }
+}
+
diff --git a/src/bin/resolver/resolver_log.cc b/src/bin/resolver/resolver_log.cc
new file mode 100644
index 0000000..4af0159
--- /dev/null
+++ b/src/bin/resolver/resolver_log.cc
@@ -0,0 +1,19 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the NSAS
+
+#include "resolver_log.h"
+
+isc::log::Logger resolver_logger("resolver");
diff --git a/src/bin/resolver/resolver_log.h b/src/bin/resolver/resolver_log.h
new file mode 100644
index 0000000..8378b98
--- /dev/null
+++ b/src/bin/resolver/resolver_log.h
@@ -0,0 +1,49 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __RESOLVER_LOG__H
+#define __RESOLVER_LOG__H
+
+#include <log/macros.h>
+#include "resolver_messages.h"
+
+/// \brief Resolver Logging
+///
+/// Defines the levels used to output debug messages in the resolver. Note that
+/// higher numbers equate to more verbose (and detailed) output.
+
+// Initialization
+const int RESOLVER_DBG_INIT = 10;
+
+// Configuration messages
+const int RESOLVER_DBG_CONFIG = 30;
+
+// Trace sending and receiving of messages
+const int RESOLVER_DBG_IO = 50;
+
+// Trace processing of messages
+const int RESOLVER_DBG_PROCESS = 70;
+
+// Detailed message information
+const int RESOLVER_DBG_DETAIL = 90;
+
+
+/// \brief Resolver Logger
+///
+/// Define the logger used to log messages. We could define it in multiple
+/// modules, but defining in a single module and linking to it saves time and
+/// space.
+extern isc::log::Logger resolver_logger;
+
+#endif // __RESOLVER_LOG__H
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
new file mode 100644
index 0000000..7930c52
--- /dev/null
+++ b/src/bin/resolver/resolver_messages.mes
@@ -0,0 +1,248 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# along with the resolver methods.
+
+% RESOLVER_AXFR_TCP AXFR request received over TCP
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_AXFR_UDP AXFR request received over UDP
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+
+% RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+
+% RESOLVER_CONFIG_CHANNEL configuration channel created
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+
+% RESOLVER_CONFIG_ERROR error in configuration: %1
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+
+% RESOLVER_CONFIG_LOADED configuration loaded
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+
+% RESOLVER_CONFIG_UPDATED configuration updated: %1
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
+
+% RESOLVER_CREATED main resolver object created
+This is a debug message indicating that the main resolver object has
+been created.
+
+% RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+
+% RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
+
+% RESOLVER_FAILED resolver failed, reason: %1
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+
+% RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+
+% RESOLVER_FORWARD_QUERY processing forward query
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
+servers.
+
+% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
+
+% RESOLVER_IXFR IXFR request received
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+
+% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+
+% RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+
+% RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+
+% RESOLVER_NORMAL_QUERY processing normal query
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+
+% RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+
+% RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+
+% RESOLVER_NO_ROOT_ADDRESS no root addresses available
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+
+% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+
+% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+
+% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+
+% RESOLVER_QUERY_SETUP query setup
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+
+% RESOLVER_QUERY_SHUTDOWN query shutdown
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+
+% RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+
+% RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
+
+% RESOLVER_RECURSIVE running in recursive mode
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+
+% RESOLVER_SERVICE_CREATED service object created
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+
+% RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
+This debug message lists the parameters being set for the resolver. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolve a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolve the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+
+The client and lookup timeouts require a bit more explanation. The
+resolution of the client query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+
+% RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+
+% RESOLVER_SHUTDOWN resolver shutdown complete
+This informational message is output when the resolver has shut down.
+
+% RESOLVER_STARTED resolver started
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+
+% RESOLVER_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+
+% RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_SET_QUERY_ACL query ACL is configured
+This debug message is generated when a new query ACL is configured for
+the resolver.
+
+% RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
diff --git a/src/bin/resolver/spec_config.h.pre.win32 b/src/bin/resolver/spec_config.h.pre.win32
new file mode 100644
index 0000000..9f20c47
--- /dev/null
+++ b/src/bin/resolver/spec_config.h.pre.win32
@@ -0,0 +1,15 @@
+// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define RESOLVER_SPECFILE_LOCATION "c:/cygwin/home/fdupont/bind10.trac826/src/bin/resolver/resolver.spec"
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 444358b..97a2ba6 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -1,6 +1,7 @@
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/bin
AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
+AM_CPPFLAGS += -I$(top_builddir)/src/bin/resolver
AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(top_srcdir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += $(BOOST_INCLUDES)
@@ -16,24 +17,29 @@ CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
TESTS += run_unittests
+
run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
run_unittests_SOURCES += ../resolver.h ../resolver.cc
+run_unittests_SOURCES += ../resolver_log.h ../resolver_log.cc
run_unittests_SOURCES += ../response_scrubber.h ../response_scrubber.cc
run_unittests_SOURCES += resolver_unittest.cc
run_unittests_SOURCES += resolver_config_unittest.cc
run_unittests_SOURCES += response_scrubber_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../resolver_messages.h ../resolver_messages.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(SQLITE_LIBS)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
-run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
@@ -42,6 +48,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
# Note the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS
diff --git a/src/bin/resolver/tests/resolver_config_unittest.cc b/src/bin/resolver/tests/resolver_config_unittest.cc
index 70e856d..b3a0a7d 100644
--- a/src/bin/resolver/tests/resolver_config_unittest.cc
+++ b/src/bin/resolver/tests/resolver_config_unittest.cc
@@ -14,14 +14,29 @@
#include <config.h>
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#endif
+
#include <string>
+#include <boost/scoped_ptr.hpp>
+
#include <gtest/gtest.h>
#include <cc/data.h>
+#include <config/ccsession.h>
+
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_socket.h>
+#include <asiolink/io_message.h>
+
+#include <acl/acl.h>
+
+#include <server_common/client.h>
#include <resolver/resolver.h>
@@ -30,25 +45,42 @@
#include <testutils/portconfig.h>
using namespace std;
+using boost::scoped_ptr;
+using namespace isc::acl;
+using isc::acl::dns::RequestContext;
using namespace isc::data;
using namespace isc::testutils;
using namespace isc::asiodns;
using namespace isc::asiolink;
+using namespace isc::server_common;
using isc::UnitTestUtil;
namespace {
class ResolverConfig : public ::testing::Test {
- public:
- IOService ios;
- DNSService dnss;
- Resolver server;
- ResolverConfig() :
- dnss(ios, NULL, NULL, NULL)
- {
- server.setDNSService(dnss);
- server.setConfigured();
- }
- void invalidTest(const string &JSON, const string& name);
+protected:
+ IOService ios;
+ DNSService dnss;
+ Resolver server;
+ scoped_ptr<const IOEndpoint> endpoint;
+ scoped_ptr<const IOMessage> query_message;
+ scoped_ptr<const Client> client;
+ scoped_ptr<const RequestContext> request;
+ ResolverConfig() : dnss(ios, NULL, NULL, NULL) {
+ server.setDNSService(dnss);
+ server.setConfigured();
+ }
+ const RequestContext& createRequest(const string& source_addr) {
+ endpoint.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress(source_addr),
+ 53210));
+ query_message.reset(new IOMessage(NULL, 0,
+ IOSocket::getDummyUDPSocket(),
+ *endpoint));
+ client.reset(new Client(*query_message));
+ request.reset(new RequestContext(client->getRequestSourceIPAddress(),
+ NULL));
+ return (*request);
+ }
+ void invalidTest(const string &JSON, const string& name);
};
TEST_F(ResolverConfig, forwardAddresses) {
@@ -77,14 +109,14 @@ TEST_F(ResolverConfig, forwardAddresses) {
TEST_F(ResolverConfig, forwardAddressConfig) {
// Try putting there some address
- ElementPtr config(Element::fromJSON("{"
- "\"forward_addresses\": ["
- " {"
- " \"address\": \"192.0.2.1\","
- " \"port\": 53"
- " }"
- "]"
- "}"));
+ ConstElementPtr config(Element::fromJSON("{"
+ "\"forward_addresses\": ["
+ " {"
+ " \"address\": \"192.0.2.1\","
+ " \"port\": 53"
+ " }"
+ "]"
+ "}"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
EXPECT_TRUE(server.isForwarding());
@@ -104,14 +136,14 @@ TEST_F(ResolverConfig, forwardAddressConfig) {
TEST_F(ResolverConfig, rootAddressConfig) {
// Try putting there some address
- ElementPtr config(Element::fromJSON("{"
- "\"root_addresses\": ["
- " {"
- " \"address\": \"192.0.2.1\","
- " \"port\": 53"
- " }"
- "]"
- "}"));
+ ConstElementPtr config(Element::fromJSON("{"
+ "\"root_addresses\": ["
+ " {"
+ " \"address\": \"192.0.2.1\","
+ " \"port\": 53"
+ " }"
+ "]"
+ "}"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
ASSERT_EQ(1, server.getRootAddresses().size());
@@ -187,12 +219,12 @@ TEST_F(ResolverConfig, timeouts) {
}
TEST_F(ResolverConfig, timeoutsConfig) {
- ElementPtr config = Element::fromJSON("{"
- "\"timeout_query\": 1000,"
- "\"timeout_client\": 2000,"
- "\"timeout_lookup\": 3000,"
- "\"retries\": 4"
- "}");
+ ConstElementPtr config = Element::fromJSON("{"
+ "\"timeout_query\": 1000,"
+ "\"timeout_client\": 2000,"
+ "\"timeout_lookup\": 3000,"
+ "\"retries\": 4"
+ "}");
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
EXPECT_EQ(1000, server.getQueryTimeout());
@@ -228,4 +260,140 @@ TEST_F(ResolverConfig, invalidTimeoutsConfig) {
"}", "Negative number of retries");
}
+TEST_F(ResolverConfig, defaultQueryACL) {
+ // If no configuration is loaded, the default ACL should reject everything.
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+
+ // The following would be allowed if the server had loaded the default
+ // configuration from the spec file. In this context it should not have
+ // happened, and they should be rejected just like the above cases.
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("127.0.0.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("::1")));
+}
+
+TEST_F(ResolverConfig, emptyQueryACL) {
+ // Explicitly configured empty ACL should have the same effect.
+ ConstElementPtr config(Element::fromJSON("{ \"query_acl\": [] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, queryACLIPv4) {
+ // A simple "accept" query for a specific IPv4 address
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"} ] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, queryACLIPv6) {
+ // same for IPv6
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"2001:db8::1\"} ] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, multiEntryACL) {
+ // A bit more complicated one: mixture of IPv4 and IPv6 with 3 rules
+ // in total. We shouldn't have to check so many variations of rules
+ // as it should have been tested in the underlying ACL module. All we
+ // have to do to check is a reasonably complicated ACL configuration is
+ // loaded as expected.
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"REJECT\","
+ " \"from\": \"192.0.2.0/24\"},"
+ " {\"action\": \"DROP\","
+ " \"from\": \"2001:db8::1\"},"
+ "] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.2")));
+ EXPECT_EQ(DROP, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::2"))); // match the default rule
+}
+
+
+int
+getResultCode(ConstElementPtr result) {
+ int rcode;
+ isc::config::parseAnswer(rcode, result);
+ return (rcode);
+}
+
+TEST_F(ResolverConfig, queryACLActionOnly) {
+ // "action only" rule will be accepted by the loader, which can
+ // effectively change the default action.
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"DROP\"} ] }"));
+ EXPECT_EQ(0, getResultCode(server.updateConfig(config)));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+
+ // We reject non matching queries by default, but the last resort
+ // rule should have changed the action in that case to "DROP".
+ EXPECT_EQ(DROP, server.getQueryACL().execute(createRequest("192.0.2.2")));
+}
+
+TEST_F(ResolverConfig, badQueryACL) {
+ // Most of these cases shouldn't happen in practice because the syntax
+ // check should be performed before updateConfig(). But we check at
+ // least the server code won't crash even if an unexpected input is given.
+
+ // ACL must be a list
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\": 1 }"))));
+ // Each rule must have "action" and "from"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"from\": \"192.0.2.1\"} ] }"))));
+ // invalid "action"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": 1,"
+ " \"from\": \"192.0.2.1\"}]}"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"BADACTION\","
+ " \"from\": \"192.0.2.1\"}]}"))));
+ // invalid "from"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": 53}]}"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"1922.0.2.1\"}]}"))));
+}
+
}
diff --git a/src/bin/resolver/tests/resolver_unittest.cc b/src/bin/resolver/tests/resolver_unittest.cc
index 97edf12..80c209a 100644
--- a/src/bin/resolver/tests/resolver_unittest.cc
+++ b/src/bin/resolver/tests/resolver_unittest.cc
@@ -12,14 +12,28 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <config.h>
+
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#endif
+
+#include <string>
+
+#include <exceptions/exceptions.h>
+
#include <dns/name.h>
+#include <cc/data.h>
#include <resolver/resolver.h>
#include <dns/tests/unittest_util.h>
#include <testutils/dnsmessage_test.h>
#include <testutils/srv_test.h>
+using namespace std;
using namespace isc::dns;
+using namespace isc::data;
+using isc::acl::dns::RequestACL;
using namespace isc::testutils;
using isc::UnitTestUtil;
@@ -28,7 +42,17 @@ const char* const TEST_PORT = "53535";
class ResolverTest : public SrvTestBase{
protected:
- ResolverTest() : server(){}
+ ResolverTest() : server() {
+ // By default queries from the "default remote address" will be
+ // rejected, so we'll need to add an explicit ACL entry to allow that.
+ server.setConfigured();
+ server.updateConfig(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"" +
+ string(DEFAULT_REMOTE_ADDRESS) +
+ "\"} ] }"));
+ }
virtual void processMessage() {
server.processMessage(*io_message,
parse_message,
@@ -136,4 +160,45 @@ TEST_F(ResolverTest, notifyFail) {
Opcode::NOTIFY().getCode(), QR_FLAG, 0, 0, 0, 0);
}
+TEST_F(ResolverTest, setQueryACL) {
+ // valid cases are tested through other tests. We only explicitly check
+ // an invalid case: passing a NULL shared pointer.
+ EXPECT_THROW(server.setQueryACL(boost::shared_ptr<const RequestACL>()),
+ isc::InvalidParameter);
+}
+
+TEST_F(ResolverTest, queryACL) {
+ // The "ACCEPT" cases are covered in other tests. Here we explicitly
+ // test "REJECT" and "DROP" cases.
+
+ // Clear the existing ACL, reverting to the "default reject" rule.
+
+ // AXFR over UDP. This would otherwise result in FORMERR.
+ server.updateConfig(Element::fromJSON("{ \"query_acl\": [] }"));
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
+ createRequestPacket(request_message, IPPROTO_UDP);
+ server.processMessage(*io_message, parse_message, response_message,
+ response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::REFUSED(),
+ Opcode::QUERY().getCode(), QR_FLAG, 1, 0, 0, 0);
+
+ // Same query, but with an explicit "DROP" ACL entry. There should be
+ // no response.
+ server.updateConfig(Element::fromJSON("{ \"query_acl\": "
+ " [ {\"action\": \"DROP\","
+ " \"from\": \"" +
+ string(DEFAULT_REMOTE_ADDRESS) +
+ "\"} ] }"));
+ parse_message->clear(Message::PARSE);
+ response_message->clear(Message::RENDER);
+ response_obuffer->clear();
+ server.processMessage(*io_message, parse_message, response_message,
+ response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
+}
+
+
}
diff --git a/src/bin/resolver/tests/response_scrubber_unittest.cc b/src/bin/resolver/tests/response_scrubber_unittest.cc
index eff5598..cfe1c9d 100644
--- a/src/bin/resolver/tests/response_scrubber_unittest.cc
+++ b/src/bin/resolver/tests/response_scrubber_unittest.cc
@@ -14,16 +14,19 @@
// $Id$
+#include <config.h>
+
+#include <stdint.h>
#include <string>
#include <iostream>
#include <gtest/gtest.h>
-#include <config.h>
-
#include <asiolink/io_endpoint.h>
#include <asiolink/io_address.h>
+#ifndef _WIN32
#include <netinet/in.h>
+#endif
#include <dns/name.h>
#include <dns/opcode.h>
@@ -68,6 +71,12 @@ public:
return address_.getFamily();
}
+ // This is completely dummy and unused. Define it just for build.
+ virtual const struct sockaddr& getSockAddr() const {
+ static struct sockaddr sa;
+ return (sa);
+ }
+
private:
IOAddress address_; // Address of endpoint
uint16_t port_; // Port number of endpoint
diff --git a/src/bin/resolver/tests/run_unittests.cc b/src/bin/resolver/tests/run_unittests.cc
index 6ae848d..d3bbab7 100644
--- a/src/bin/resolver/tests/run_unittests.cc
+++ b/src/bin/resolver/tests/run_unittests.cc
@@ -13,6 +13,8 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
#include <dns/tests/unittest_util.h>
@@ -21,6 +23,7 @@ main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR);
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
+ isc::log::initLogger();
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/bin/sockcreator/README b/src/bin/sockcreator/README
index 4dbbee7..e142d19 100644
--- a/src/bin/sockcreator/README
+++ b/src/bin/sockcreator/README
@@ -3,7 +3,7 @@ The socket creator
The only thing we need higher rights than standard user is binding sockets to
ports lower than 1024. So we will have a separate process that keeps the
-rights, while the rests drop them for security reasons.
+rights, while the rest drops them for security reasons.
This process is the socket creator. Its goal is to be as simple as possible
and to contain as little code as possible to minimise the amount of code
diff --git a/src/bin/sockcreator/tests/Makefile.am b/src/bin/sockcreator/tests/Makefile.am
index 2e1307a..223e761 100644
--- a/src/bin/sockcreator/tests/Makefile.am
+++ b/src/bin/sockcreator/tests/Makefile.am
@@ -16,10 +16,9 @@ run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la
-run_unittests_LDADD += \
- $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/sockcreator/tests/run_unittests.cc b/src/bin/sockcreator/tests/run_unittests.cc
index e787ab1..1287164 100644
--- a/src/bin/sockcreator/tests/run_unittests.cc
+++ b/src/bin/sockcreator/tests/run_unittests.cc
@@ -13,10 +13,11 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char *argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
+ return isc::util::unittests::run_all();
}
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index 485bc05..3289765 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,16 +5,25 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec
+b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
CLEANFILES = b10-stats stats.pyc
CLEANFILES += b10-stats-httpd stats_httpd.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
man_MANS = b10-stats.8 b10-stats-httpd.8
EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec
+EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
+EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
if ENABLE_MAN
@@ -26,12 +35,24 @@ b10-stats-httpd.8: b10-stats-httpd.xml
endif
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py : stats_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py : stats_httpd_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_httpd_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-stats: stats.py
- $(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
- -e "s|.*#@@REMOVED@@$$||" stats.py >$@
+b10-stats: stats.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+ $(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats.py >$@
chmod a+x $@
-b10-stats-httpd: stats_httpd.py
+b10-stats-httpd: stats_httpd.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats_httpd.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index c066f91..ed4aafa 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -66,6 +66,10 @@ bindctl(1)\&. Please see the manual of
bindctl(1)
about how to configure the settings\&.
.PP
+/usr/local/share/bind10\-devel/stats\-schema\&.spec
+\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
+bindctl(1)\&.
+.PP
/usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
\(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 5cf3b4b..34c704f 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -112,6 +112,12 @@
of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
how to configure the settings.
</para>
+ <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
+ <!--TODO: The filename should be computed from prefix-->
+ — This is a spec file for data schema of
+ of BIND 10 statistics. This schema cannot be configured
+ via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
+ </para>
<para>
<filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
<!--TODO: The filename should be computed from prefix-->
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index 5714234..98b109b 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -1,22 +1,13 @@
'\" t
.\" Title: b10-stats
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: Oct 15, 2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-STATS" "8" "Oct 15, 2010" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "B10\-STATS" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -45,9 +36,9 @@ with other modules like
\fBb10\-auth\fR
and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. Stats module collects data and aggregates it\&.
\fBb10\-stats\fR
-invokes "sendstats" command for
+invokes an internal command for
\fBbind10\fR
-after its initial starting because it\*(Aqs sure to collect statistics data from
+after its initial starting because it\'s sure to collect statistics data from
\fBbind10\fR\&.
.SH "OPTIONS"
.PP
@@ -59,15 +50,99 @@ This
\fBb10\-stats\fR
switches to verbose mode\&. It sends verbose messages to STDOUT\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The
+\fBb10\-stats\fR
+command does not have any configurable settings\&.
+.PP
+The configuration commands are:
+.PP
+
+
+\fBremove\fR
+removes the named statistics name and data\&.
+.PP
+
+
+\fBreset\fR
+will reset all statistics data to default values except for constant names\&. This may re\-add previously removed statistics names\&.
+.PP
+
+\fBset\fR
+.PP
+
+\fBshow\fR
+will send the statistics data in JSON format\&. By default, it outputs all the statistics data it has collected\&. An optional item name may be specified to receive individual output\&.
+.PP
+
+\fBshutdown\fR
+will shutdown the
+\fBb10\-stats\fR
+process\&. (Note that the
+\fBbind10\fR
+parent may restart it\&.)
+.PP
+
+\fBstatus\fR
+simply indicates that the daemon is running\&.
+.SH "STATISTICS DATA"
+.PP
+The
+\fBb10\-stats\fR
+daemon contains these statistics:
+.PP
+report_time
+.RS 4
+The latest report date and time in ISO 8601 format\&.
+.RE
+.PP
+stats\&.boot_time
+.RS 4
+The date and time when this daemon was started in ISO 8601 format\&. This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.
+.RE
+.PP
+stats\&.last_update_time
+.RS 4
+The date and time (in ISO 8601 format) when this daemon last received data from another component\&.
+.RE
+.PP
+stats\&.lname
+.RS 4
+This is the name used for the
+\fBb10\-msgq\fR
+command\-control channel\&. (This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.)
+.RE
+.PP
+stats\&.start_time
+.RS 4
+This is the date and time (in ISO 8601 format) when this daemon started collecting data\&.
+.RE
+.PP
+stats\&.timestamp
+.RS 4
+The current date and time represented in seconds since UNIX epoch (1970\-01\-01T0 0:00:00Z) with precision (delimited with a period) up to one hundred thousandth of second\&.
+.RE
+.PP
+See other manual pages for explanations for their statistics that are kept track by
+\fBb10\-stats\fR\&.
.SH "FILES"
.PP
/usr/local/share/bind10\-devel/stats\&.spec
\(em This is a spec file for
-\fBb10\-stats\fR\&. It contains definitions of statistics items of BIND 10 and commands received via
+\fBb10\-stats\fR\&. It contains commands for
+\fBb10\-stats\fR\&. They can be invoked via
+bindctl(1)\&.
+.PP
+/usr/local/share/bind10\-devel/stats\-schema\&.spec
+\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
bindctl(1)\&.
.SH "SEE ALSO"
.PP
+\fBb10-stats-httpd\fR(8),
\fBbind10\fR(8),
\fBbindctl\fR(1),
\fBb10-auth\fR(8),
@@ -76,7 +151,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-stats\fR
-daemon was initially designed and implemented by Naoki Kambe of JPRS in Oct 2010\&.
+daemon was initially designed and implemented by Naoki Kambe of JPRS in October 2010\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index 7ec58dd..9709175 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>Oct 15, 2010</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -64,9 +64,10 @@
send stats data to stats module independently from
implementation of stats module, so the frequency of sending data
may not be constant. Stats module collects data and aggregates
- it. <command>b10-stats</command> invokes "sendstats" command
+ it. <command>b10-stats</command> invokes an internal command
for <command>bind10</command> after its initial starting because it's
sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
</para>
</refsect1>
@@ -87,12 +88,136 @@
</refsect1>
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The <command>b10-stats</command> command does not have any
+ configurable settings.
+ </para>
+
+<!-- TODO: formating -->
+ <para>
+ The configuration commands are:
+ </para>
+
+ <para>
+<!-- TODO: remove is removed in trac930 -->
+ <command>remove</command> removes the named statistics name and data.
+ </para>
+
+ <para>
+<!-- TODO: reset is removed in trac930 -->
+ <command>reset</command> will reset all statistics data to
+ default values except for constant names.
+ This may re-add previously removed statistics names.
+ </para>
+
+ <para>
+ <command>set</command>
+<!-- TODO: document this -->
+ </para>
+
+ <para>
+ <command>show</command> will send the statistics data
+ in JSON format.
+ By default, it outputs all the statistics data it has collected.
+ An optional item name may be specified to receive individual output.
+ </para>
+
+<!-- TODO: document showschema -->
+
+ <para>
+ <command>shutdown</command> will shutdown the
+ <command>b10-stats</command> process.
+ (Note that the <command>bind10</command> parent may restart it.)
+ </para>
+
+ <para>
+ <command>status</command> simply indicates that the daemon is
+ running.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The <command>b10-stats</command> daemon contains these statistics:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+ <listitem><simpara>The latest report date and time in
+ ISO 8601 format.</simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.boot_time</term>
+ <listitem><simpara>The date and time when this daemon was
+ started in ISO 8601 format.
+ This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.last_update_time</term>
+ <listitem><simpara>The date and time (in ISO 8601 format)
+ when this daemon last received data from another component.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.lname</term>
+ <listitem><simpara>This is the name used for the
+ <command>b10-msgq</command> command-control channel.
+ (This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.)
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.start_time</term>
+ <listitem><simpara>This is the date and time (in ISO 8601 format)
+ when this daemon started collecting data.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.timestamp</term>
+ <listitem><simpara>The current date and time represented in
+ seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+ precision (delimited with a period) up to
+ one hundred thousandth of second.</simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+ See other manual pages for explanations for their statistics
+ that are kept track by <command>b10-stats</command>.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
+ <!--TODO: The filename should be computed from prefix-->
— This is a spec file for <command>b10-stats</command>. It
- contains definitions of statistics items of BIND 10 and commands
- received via
- <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
+ contains commands for <command>b10-stats</command>. They can be
+ invoked
+ via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
+ </para>
+ <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
+ <!--TODO: The filename should be computed from prefix-->
+ — This is a spec file for data schema of
+ of BIND 10 statistics. This schema cannot be configured
+ via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
</para>
</refsect1>
@@ -100,6 +225,9 @@
<title>SEE ALSO</title>
<para>
<citerefentry>
+ <refentrytitle>b10-stats-httpd</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
<refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum>
</citerefentry>,
<citerefentry>
@@ -116,7 +244,7 @@
<title>HISTORY</title>
<para>
The <command>b10-stats</command> daemon was initially designed
- and implemented by Naoki Kambe of JPRS in Oct 2010.
+ and implemented by Naoki Kambe of JPRS in October 2010.
</para>
</refsect1>
</refentry><!--
diff --git a/src/bin/stats/run_b10-stats-httpd.sh.in b/src/bin/stats/run_b10-stats-httpd.sh.in
deleted file mode 100755
index 67c93f0..0000000
--- a/src/bin/stats/run_b10-stats-httpd.sh.in
+++ /dev/null
@@ -1,33 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
-export PYTHON_EXEC
-
-PYTHONPATH=@abs_top_builddir@/src/lib/python
-export PYTHONPATH
-
-BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
-export BIND10_MSGQ_SOCKET_FILE
-
-STATS_PATH=@abs_top_builddir@/src/bin/stats
-
-B10_FROM_SOURCE=@abs_top_srcdir@
-export B10_FROM_SOURCE
-
-cd ${STATS_PATH}
-exec ${PYTHON_EXEC} -O b10-stats-httpd "$@"
diff --git a/src/bin/stats/run_b10-stats.sh.in b/src/bin/stats/run_b10-stats.sh.in
deleted file mode 100755
index b9007c8..0000000
--- a/src/bin/stats/run_b10-stats.sh.in
+++ /dev/null
@@ -1,33 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2010, 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
-export PYTHON_EXEC
-
-PYTHONPATH=@abs_top_builddir@/src/lib/python
-export PYTHONPATH
-
-BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
-export BIND10_MSGQ_SOCKET_FILE
-
-B10_FROM_SOURCE=@abs_top_srcdir@
-export B10_FROM_SOURCE
-
-STATS_PATH=@abs_top_builddir@/src/bin/stats
-
-cd ${STATS_PATH}
-exec ${PYTHON_EXEC} -O b10-stats "$@"
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
new file mode 100644
index 0000000..d5846ad
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<stats:stats_data version="1.0"
+ xmlns:stats="$xsd_namespace"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="$xsd_namespace $xsd_url_path">
+ $xml_string
+</stats:stats_data>
diff --git a/src/bin/stats/stats-httpd-xml.tpl.in b/src/bin/stats/stats-httpd-xml.tpl.in
deleted file mode 100644
index d5846ad..0000000
--- a/src/bin/stats/stats-httpd-xml.tpl.in
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
- xmlns:stats="$xsd_namespace"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="$xsd_namespace $xsd_url_path">
- $xml_string
-</stats:stats_data>
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
new file mode 100644
index 0000000..6ad1280
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<schema targetNamespace="$xsd_namespace"
+ xmlns="http://www.w3.org/2001/XMLSchema"
+ xmlns:stats="$xsd_namespace">
+ <annotation>
+ <documentation xml:lang="en">XML schema of the statistics
+ data in BIND 10</documentation>
+ </annotation>
+ <element name="stats_data">
+ <annotation>
+ <documentation>A set of statistics data</documentation>
+ </annotation>
+ <complexType>
+ $xsd_string
+ <attribute name="version" type="token" use="optional" default="1.0">
+ <annotation>
+ <documentation>Version number of syntax</documentation>
+ </annotation>
+ </attribute>
+ </complexType>
+ </element>
+</schema>
diff --git a/src/bin/stats/stats-httpd-xsd.tpl.in b/src/bin/stats/stats-httpd-xsd.tpl.in
deleted file mode 100644
index 6ad1280..0000000
--- a/src/bin/stats/stats-httpd-xsd.tpl.in
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
- xmlns="http://www.w3.org/2001/XMLSchema"
- xmlns:stats="$xsd_namespace">
- <annotation>
- <documentation xml:lang="en">XML schema of the statistics
- data in BIND 10</documentation>
- </annotation>
- <element name="stats_data">
- <annotation>
- <documentation>A set of statistics data</documentation>
- </annotation>
- <complexType>
- $xsd_string
- <attribute name="version" type="token" use="optional" default="1.0">
- <annotation>
- <documentation>Version number of syntax</documentation>
- </annotation>
- </attribute>
- </complexType>
- </element>
-</schema>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
new file mode 100644
index 0000000..01ffdc6
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<xsl:stylesheet version="1.0"
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
+ xmlns:stats="$xsd_namespace">
+ <xsl:output method="html" encoding="UTF-8"
+ doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
+ doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
+ <xsl:template match="/">
+ <html lang="en">
+ <head>
+ <title>BIND 10 Statistics</title>
+ <style type="text/css"><![CDATA[
+table {
+ border: 1px #000000 solid;
+ border-collapse: collapse;
+}
+td, th {
+ padding: 3px 20px;
+ border: 1px #000000 solid;
+}
+td.title {
+ text-decoration:underline;
+}
+]]>
+ </style>
+ </head>
+ <body>
+ <h1>BIND 10 Statistics</h1>
+ <table>
+ <tr>
+ <th>Title</th>
+ <th>Value</th>
+ </tr>
+ <xsl:apply-templates />
+ </table>
+ </body>
+ </html>
+ </xsl:template>
+ $xsl_string
+</xsl:stylesheet>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl.in b/src/bin/stats/stats-httpd-xsl.tpl.in
deleted file mode 100644
index 01ffdc6..0000000
--- a/src/bin/stats/stats-httpd-xsl.tpl.in
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<xsl:stylesheet version="1.0"
- xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
- xmlns:stats="$xsd_namespace">
- <xsl:output method="html" encoding="UTF-8"
- doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
- doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
- <xsl:template match="/">
- <html lang="en">
- <head>
- <title>BIND 10 Statistics</title>
- <style type="text/css"><![CDATA[
-table {
- border: 1px #000000 solid;
- border-collapse: collapse;
-}
-td, th {
- padding: 3px 20px;
- border: 1px #000000 solid;
-}
-td.title {
- text-decoration:underline;
-}
-]]>
- </style>
- </head>
- <body>
- <h1>BIND 10 Statistics</h1>
- <table>
- <tr>
- <th>Title</th>
- <th>Value</th>
- </tr>
- <xsl:apply-templates />
- </table>
- </body>
- </html>
- </xsl:template>
- $xsl_string
-</xsl:stylesheet>
diff --git a/src/bin/stats/stats-httpd.spec b/src/bin/stats/stats-httpd.spec
new file mode 100644
index 0000000..6307135
--- /dev/null
+++ b/src/bin/stats/stats-httpd.spec
@@ -0,0 +1,54 @@
+{
+ "module_spec": {
+ "module_name": "StatsHttpd",
+ "module_description": "Stats HTTP daemon",
+ "config_data": [
+ {
+ "item_name": "listen_on",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "address": "127.0.0.1",
+ "port": 8000
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "address",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "address",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "127.0.0.1",
+ "item_description": "listen-on address for HTTP"
+ },
+ {
+ "item_name": "port",
+ "item_type": "integer",
+ "item_optional": true,
+ "item_default": 8000,
+ "item_description": "listen-on port for HTTP"
+ }
+ ]
+ },
+ "item_description": "http listen-on address and port"
+ }
+ ],
+ "commands": [
+ {
+ "command_name": "status",
+ "command_description": "Status of the stats httpd",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats httpd",
+ "command_args": []
+ }
+ ]
+ }
+}
diff --git a/src/bin/stats/stats-httpd.spec.in b/src/bin/stats/stats-httpd.spec.in
deleted file mode 100644
index 6307135..0000000
--- a/src/bin/stats/stats-httpd.spec.in
+++ /dev/null
@@ -1,54 +0,0 @@
-{
- "module_spec": {
- "module_name": "StatsHttpd",
- "module_description": "Stats HTTP daemon",
- "config_data": [
- {
- "item_name": "listen_on",
- "item_type": "list",
- "item_optional": false,
- "item_default": [
- {
- "address": "127.0.0.1",
- "port": 8000
- }
- ],
- "list_item_spec": {
- "item_name": "address",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": [
- {
- "item_name": "address",
- "item_type": "string",
- "item_optional": true,
- "item_default": "127.0.0.1",
- "item_description": "listen-on address for HTTP"
- },
- {
- "item_name": "port",
- "item_type": "integer",
- "item_optional": true,
- "item_default": 8000,
- "item_description": "listen-on port for HTTP"
- }
- ]
- },
- "item_description": "http listen-on address and port"
- }
- ],
- "commands": [
- {
- "command_name": "status",
- "command_description": "Status of the stats httpd",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats httpd",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
new file mode 100644
index 0000000..5252865
--- /dev/null
+++ b/src/bin/stats/stats-schema.spec
@@ -0,0 +1,86 @@
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Statistics data schema",
+ "config_data": [
+ {
+ "item_name": "report_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Report time",
+ "item_description": "A date time when stats module reports",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "bind10.boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "bind10.BootTime",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "stats.boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "stats.BootTime",
+ "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "stats.start_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "stats.StartTime",
+ "item_description": "A date time when the stats module starts collecting data or resetting values last time",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "stats.last_update_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "stats.LastUpdateTime",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "stats.timestamp",
+ "item_type": "real",
+ "item_optional": false,
+ "item_default": 0.0,
+ "item_title": "stats.Timestamp",
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+ },
+ {
+ "item_name": "stats.lname",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "stats.LocalName",
+ "item_description": "A localname of stats module given via CC protocol"
+ },
+ {
+ "item_name": "auth.queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "auth.queries.tcp",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "auth.queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "auth.queries.udp",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
+ ],
+ "commands": []
+ }
+}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
old mode 100644
new mode 100755
index dd617f8..afed544
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -24,12 +24,16 @@ from optparse import OptionParser, OptionValueError
from collections import defaultdict
from isc.config.ccsession import ModuleCCSession, create_answer
from isc.cc import Session, SessionError
-# Note: Following lines are removed in b10-stats #@@REMOVED@@
-if __name__ == 'stats': #@@REMOVED@@
- try: #@@REMOVED@@
- from fake_time import time, strftime, gmtime #@@REMOVED@@
- except ImportError: #@@REMOVED@@
- pass #@@REMOVED@@
+
+import isc.log
+from isc.log_messages.stats_messages import *
+
+isc.log.init("b10-stats")
+logger = isc.log.Logger("stats")
+
+# Some constants for debug levels, these should be removed when we
+# have #1074
+DBG_STATS_MESSAGING = 30
# for setproctitle
import isc.util.process
@@ -39,13 +43,15 @@ isc.util.process.rename()
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
+ BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
- SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+ BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
+ BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
+SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
class Singleton(type):
"""
@@ -147,9 +153,8 @@ class SessionSubject(Subject, metaclass=Singleton):
"""
A concrete subject class which creates CC session object
"""
- def __init__(self, session=None, verbose=False):
+ def __init__(self, session=None):
Subject.__init__(self)
- self.verbose = verbose
self.session=session
self.running = False
@@ -169,9 +174,8 @@ class CCSessionListener(Listener):
A concrete listener class which creates SessionSubject object and
ModuleCCSession object
"""
- def __init__(self, subject, verbose=False):
+ def __init__(self, subject):
Listener.__init__(self, subject)
- self.verbose = verbose
self.session = subject.session
self.boot_time = get_datetime()
@@ -184,8 +188,7 @@ class CCSessionListener(Listener):
self.session = self.subject.session = self.cc_session._session
# initialize internal data
- self.config_spec = self.cc_session.get_module_spec().get_config_spec()
- self.stats_spec = self.config_spec
+ self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
self.stats_data = self.initialize_data(self.stats_spec)
# add event handler invoked via SessionSubject object
@@ -208,8 +211,15 @@ class CCSessionListener(Listener):
kwargs = self.initialize_data(cmd["command_args"])
self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
except AttributeError as ae:
- sys.stderr.write("[b10-stats] Caught undefined command while parsing spec file: "
- +str(cmd["command_name"])+"\n")
+ logger.error(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+
+ def _update_stats_data(self, args):
+ # 'args' must be dictionary type
+ if isinstance(args, dict) and isinstance(args.get('stats_data'), dict):
+ self.stats_data.update(args['stats_data'])
+
+ # overwrite "stats.LastUpdateTime"
+ self.stats_data['stats.last_update_time'] = get_datetime()
def start(self):
"""
@@ -222,11 +232,17 @@ class CCSessionListener(Listener):
self.stats_data['stats.lname'] = self.session.lname
self.cc_session.start()
# request Bob to send statistics data
- if self.verbose:
- sys.stdout.write("[b10-stats] request Bob to send statistics data\n")
- cmd = isc.config.ccsession.create_command("sendstats", None)
+ logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
+ cmd = isc.config.ccsession.create_command("getstats", None)
seq = self.session.group_sendmsg(cmd, 'Boss')
- self.session.group_recvmsg(True, seq)
+ try:
+ answer, env = self.session.group_recvmsg(False, seq)
+ if answer:
+ rcode, arg = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ self._update_stats_data(arg)
+ except isc.cc.session.SessionTimeout:
+ pass
def stop(self):
"""
@@ -244,8 +260,8 @@ class CCSessionListener(Listener):
"""
handle a configure from the cc channel
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] newconfig received: "+str(new_config)+"\n")
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
+ new_config)
# do nothing currently
return create_answer(0)
@@ -267,8 +283,7 @@ class CCSessionListener(Listener):
"""
handle shutdown command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'shutdown' command received\n")
+ logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
self.subject.running = False
return create_answer(0)
@@ -276,28 +291,21 @@ class CCSessionListener(Listener):
"""
handle set command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'set' command received, args: "+str(args)+"\n")
-
- # 'args' must be dictionary type
- self.stats_data.update(args['stats_data'])
-
- # overwrite "stats.LastUpdateTime"
- self.stats_data['stats.last_update_time'] = get_datetime()
-
+ self._update_stats_data(args)
return create_answer(0)
def command_remove(self, args, stats_item_name=''):
"""
handle remove command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'remove' command received, args: "+str(args)+"\n")
# 'args' must be dictionary type
if args and args['stats_item_name'] in self.stats_data:
stats_item_name = args['stats_item_name']
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_REMOVE_COMMAND,
+ stats_item_name)
+
# just remove one item
self.stats_data.pop(stats_item_name)
@@ -307,8 +315,6 @@ class CCSessionListener(Listener):
"""
handle show command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'show' command received, args: "+str(args)+"\n")
# always overwrite 'report_time' and 'stats.timestamp'
# if "show" command invoked
@@ -318,16 +324,21 @@ class CCSessionListener(Listener):
# if with args
if args and args['stats_item_name'] in self.stats_data:
stats_item_name = args['stats_item_name']
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_NAME_COMMAND,
+ stats_item_name)
return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_ALL_COMMAND)
return create_answer(0, self.stats_data)
def command_reset(self, args):
"""
handle reset command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'reset' command received\n")
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_RESET_COMMAND)
# re-initialize internal variables
self.stats_data = self.initialize_data(self.stats_spec)
@@ -344,8 +355,7 @@ class CCSessionListener(Listener):
"""
handle status command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'status' command received\n")
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
# just return "I'm alive."
return create_answer(0, "I'm alive.")
@@ -353,9 +363,7 @@ class CCSessionListener(Listener):
"""
handle an unknown command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] Unknown command received: '"
- + str(command) + "'\n")
+ logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
return create_answer(1, "Unknown command: '"+str(command)+"'")
@@ -402,20 +410,21 @@ def main(session=None):
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="display more about what is going on")
(options, args) = parser.parse_args()
- subject = SessionSubject(session=session, verbose=options.verbose)
- listener = CCSessionListener(subject, verbose=options.verbose)
+ if options.verbose:
+ isc.log.init("b10-stats", "DEBUG", 99)
+ subject = SessionSubject(session=session)
+ listener = CCSessionListener(subject)
subject.start()
while subject.running:
subject.check()
subject.stop()
- except OptionValueError:
- sys.stderr.write("[b10-stats] Error parsing options\n")
+ except OptionValueError as ove:
+ logger.fatal(STATS_BAD_OPTION_VALUE, ove)
except SessionError as se:
- sys.stderr.write("[b10-stats] Error creating Stats module, "
- + "is the command channel daemon running?\n")
+ logger.fatal(STATS_CC_SESSION_ERROR, se)
except KeyboardInterrupt as kie:
- sys.stderr.write("[b10-stats] Interrupted, exiting\n")
+ logger.info(STATS_STOPPED_BY_KEYBOARD)
if __name__ == "__main__":
main()
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
new file mode 100644
index 0000000..635eb48
--- /dev/null
+++ b/src/bin/stats/stats.spec
@@ -0,0 +1,106 @@
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "status",
+ "command_description": "identify whether stats module is alive or not",
+ "command_args": []
+ },
+ {
+ "command_name": "show",
+ "command_description": "show the specified/all statistics data",
+ "command_args": [
+ {
+ "item_name": "stats_item_name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": ""
+ }
+ ]
+ },
+ {
+ "command_name": "set",
+ "command_description": "set the value of specified name in statistics data",
+ "command_args": [
+ {
+ "item_name": "stats_data",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": []
+ }
+ ]
+ },
+ {
+ "command_name": "remove",
+ "command_description": "remove the specified name from statistics data",
+ "command_args": [
+ {
+ "item_name": "stats_item_name",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ }
+ ]
+ },
+ {
+ "command_name": "reset",
+ "command_description": "reset all statistics data to default values except for several constant names",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats module",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "report_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Report time",
+ "item_description": "A date time when stats module reports",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "last_update_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Last update time",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "timestamp",
+ "item_type": "real",
+ "item_optional": false,
+ "item_default": 0.0,
+ "item_title": "Timestamp",
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+ },
+ {
+ "item_name": "lname",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "A localname of stats module given via CC protocol"
+ }
+ ]
+ }
+}
diff --git a/src/bin/stats/stats.spec.in b/src/bin/stats/stats.spec.in
deleted file mode 100644
index 4d42ebf..0000000
--- a/src/bin/stats/stats.spec.in
+++ /dev/null
@@ -1,140 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [
- {
- "item_name": "report_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Report time",
- "item_description": "A date time when stats module reports",
- "item_format": "date-time"
- },
- {
- "item_name": "bind10.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "bind10.BootTime",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.BootTime",
- "item_description": "A date time when the stats module starts initially or when the stats module restarts",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.start_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.StartTime",
- "item_description": "A date time when the stats module starts collecting data or resetting values last time",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.last_update_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.LastUpdateTime",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.timestamp",
- "item_type": "real",
- "item_optional": false,
- "item_default": 0.0,
- "item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
- "item_format": "second"
- },
- {
- "item_name": "stats.lname",
- "item_type": "string",
- "item_optional": false,
- "item_default": "",
- "item_title": "stats.LocalName",
- "item_description": "A localname of stats module given via CC protocol"
- },
- {
- "item_name": "auth.queries.tcp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.tcp",
- "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
- },
- {
- "item_name": "auth.queries.udp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.udp",
- "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
- }
- ],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "show",
- "command_description": "show the specified/all statistics data",
- "command_args": [
- {
- "item_name": "stats_item_name",
- "item_type": "string",
- "item_optional": true,
- "item_default": ""
- }
- ]
- },
- {
- "command_name": "set",
- "command_description": "set the value of specified name in statistics data",
- "command_args": [
- {
- "item_name": "stats_data",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": []
- }
- ]
- },
- {
- "command_name": "remove",
- "command_description": "remove the specified name from statistics data",
- "command_args": [
- {
- "item_name": "stats_item_name",
- "item_type": "string",
- "item_optional": false,
- "item_default": ""
- }
- ]
- },
- {
- "command_name": "reset",
- "command_description": "reset all statistics data to default values except for several constant names",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats module",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100644
new mode 100755
index dd9220e..6be6adf
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -34,6 +34,17 @@ import isc.cc
import isc.config
import isc.util.process
+import isc.log
+from isc.log_messages.stats_httpd_messages import *
+
+isc.log.init("b10-stats-httpd")
+logger = isc.log.Logger("stats-httpd")
+
+# Some constants for debug levels, these should be removed when we
+# have #1074
+DBG_STATHTTPD_INIT = 10
+DBG_STATHTTPD_MESSAGING = 30
+
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
@@ -46,7 +57,7 @@ else:
BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-STATS_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
+SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -98,9 +109,7 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
return None
except StatsHttpdError as err:
self.send_error(500)
- if self.server.verbose:
- self.server.log_writer(
- "[b10-stats-httpd] %s\n" % err)
+ logger.error(STATHTTPD_SERVER_ERROR, err)
return None
else:
self.send_response(200)
@@ -109,15 +118,6 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
self.end_headers()
return body
- def log_message(self, format, *args):
- """Change the default log format"""
- if self.server.verbose:
- self.server.log_writer(
- "[b10-stats-httpd] %s - - [%s] %s\n" %
- (self.address_string(),
- self.log_date_time_string(),
- format%args))
-
class HttpServerError(Exception):
"""Exception class for HttpServer class. It is intended to be
passed from the HttpServer object to the StatsHttpd object."""
@@ -134,13 +134,12 @@ class HttpServer(http.server.HTTPServer):
sys.stderr.write. They are intended to be referred by HttpHandler
object."""
def __init__(self, server_address, handler,
- xml_handler, xsd_handler, xsl_handler, log_writer, verbose=False):
+ xml_handler, xsd_handler, xsl_handler, log_writer):
self.server_address = server_address
self.xml_handler = xml_handler
self.xsd_handler = xsd_handler
self.xsl_handler = xsl_handler
self.log_writer = log_writer
- self.verbose = verbose
http.server.HTTPServer.__init__(self, server_address, handler)
class StatsHttpdError(Exception):
@@ -154,8 +153,7 @@ class StatsHttpd:
statistics module. It handles HTTP requests, and command channel
and config channel CC session. It uses select.select function
while waiting for clients requests."""
- def __init__(self, verbose=False):
- self.verbose = verbose
+ def __init__(self):
self.running = False
self.poll_intval = 0.5
self.write_log = sys.stderr.write
@@ -169,13 +167,12 @@ class StatsHttpd:
def open_mccs(self):
"""Opens a ModuleCCSession object"""
# create ModuleCCSession
- if self.verbose:
- self.write_log("[b10-stats-httpd] Starting CC Session\n")
+ logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_STARTING_CC_SESSION)
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
# read spec file of stats module and subscribe 'Stats'
- self.stats_module_spec = isc.config.module_spec_from_file(STATS_SPECFILE_LOCATION)
+ self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
self.stats_config_spec = self.stats_module_spec.get_config_spec()
self.stats_module_name = self.stats_module_spec.get_module_name()
@@ -183,8 +180,8 @@ class StatsHttpd:
"""Closes a ModuleCCSession object"""
if self.mccs is None:
return
- if self.verbose:
- self.write_log("[b10-stats-httpd] Closing CC Session\n")
+
+ logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_CLOSING_CC_SESSION)
self.mccs.close()
self.mccs = None
@@ -221,7 +218,7 @@ class StatsHttpd:
httpd = HttpServer(
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
- self.write_log, self.verbose)
+ self.write_log)
except (socket.gaierror, socket.error,
OverflowError, TypeError) as err:
# try IPv4 next
@@ -233,10 +230,8 @@ class StatsHttpd:
(server_address[0], server_address[1],
err.__class__.__name__, err))
else:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Started on address %s, port %s\n" %
- server_address)
+ logger.info(STATHTTPD_STARTED, server_address[0],
+ server_address[1])
return httpd
def close_httpd(self):
@@ -244,11 +239,8 @@ class StatsHttpd:
if len(self.httpd) == 0:
return
for ht in self.httpd:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Closing address %s, port %s\n" %
- (ht.server_address[0], ht.server_address[1])
- )
+ logger.info(STATHTTPD_CLOSING, ht.server_address[0],
+ ht.server_address[1])
ht.server_close()
self.httpd = []
@@ -285,8 +277,7 @@ class StatsHttpd:
def stop(self):
"""Stops the running StatsHttpd objects. Closes CC session and
HTTP handling sockets"""
- if self.verbose:
- self.write_log("[b10-stats-httpd] Shutting down\n")
+ logger.info(STATHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
@@ -303,13 +294,11 @@ class StatsHttpd:
def config_handler(self, new_config):
"""Config handler for the ModuleCCSession object. It resets
addresses and ports to listen HTTP requests on."""
- if self.verbose:
- self.write_log("[b10-stats-httpd] Loading config : %s\n" % str(new_config))
+ logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
+ new_config)
for key in new_config.keys():
- if key not in DEFAULT_CONFIG:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Unknown known config: %s" % key)
+ if key not in DEFAULT_CONFIG and key != "version":
+ logger.error(STATHTTPD_UNKNOWN_CONFIG_ITEM, key)
return isc.config.ccsession.create_answer(
1, "Unknown known config: %s" % key)
# backup old config
@@ -319,9 +308,7 @@ class StatsHttpd:
try:
self.open_httpd()
except HttpServerError as err:
- if self.verbose:
- self.write_log("[b10-stats-httpd] %s\n" % err)
- self.write_log("[b10-stats-httpd] Restoring old config\n")
+ logger.error(STATHTTPD_SERVER_ERROR, err)
# restore old config
self.config_handler(old_config)
return isc.config.ccsession.create_answer(
@@ -333,19 +320,19 @@ class StatsHttpd:
"""Command handler for the ModuleCCSesson object. It handles
"status" and "shutdown" commands."""
if command == "status":
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received 'status' command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_STATUS_COMMAND)
return isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")")
elif command == "shutdown":
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received 'shutdown' command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
return isc.config.ccsession.create_answer(
0, "Stats Httpd is shutting down.")
else:
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received unknown command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
return isc.config.ccsession.create_answer(
1, "Unknown command: " + str(command))
@@ -398,7 +385,14 @@ class StatsHttpd:
annotation.append(documentation)
element.append(annotation)
xsd_root.append(element)
- xsd_string = xml.etree.ElementTree.tostring(xsd_root)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
+ encoding='us-ascii')
self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
xsd_string=xsd_string,
xsd_namespace=XSD_NAMESPACE
@@ -423,7 +417,14 @@ class StatsHttpd:
tr.append(td1)
tr.append(td2)
xsd_root.append(tr)
- xsl_string = xml.etree.ElementTree.tostring(xsd_root)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
+ encoding='us-ascii')
self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
xsl_string=xsl_string,
xsd_namespace=XSD_NAMESPACE)
@@ -437,8 +438,15 @@ class StatsHttpd:
(k, v) = (str(k), str(v))
elem = xml.etree.ElementTree.Element(k)
elem.text = v
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
xml_list.append(
- xml.etree.ElementTree.tostring(elem))
+ str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
+ encoding='us-ascii'))
xml_string = "".join(xml_list)
self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
xml_string=xml_string,
@@ -472,14 +480,18 @@ if __name__ == "__main__":
"-v", "--verbose", dest="verbose", action="store_true",
help="display more about what is going on")
(options, args) = parser.parse_args()
- stats_httpd = StatsHttpd(verbose=options.verbose)
+ if options.verbose:
+ isc.log.init("b10-stats-httpd", "DEBUG", 99)
+ stats_httpd = StatsHttpd()
stats_httpd.start()
- except OptionValueError:
- sys.exit("[b10-stats-httpd] Error parsing options")
+ except OptionValueError as ove:
+ logger.fatal(STATHTTPD_BAD_OPTION_VALUE, ove)
+ sys.exit(1)
except isc.cc.session.SessionError as se:
- sys.exit("[b10-stats-httpd] Error creating module, "
- + "is the command channel daemon running?")
+ logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
+ sys.exit(1)
except HttpServerError as hse:
- sys.exit("[b10-stats-httpd] %s" % hse)
+ logger.fatal(STATHTTPD_START_SERVER_ERROR, hse)
+ sys.exit(1)
except KeyboardInterrupt as kie:
- sys.exit("[b10-stats-httpd] Interrupted, exiting")
+ logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
new file mode 100644
index 0000000..0e984dc
--- /dev/null
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -0,0 +1,92 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the stats_httpd_messages python module.
+
+% STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+
+% STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+
+% STATHTTPD_CLOSING_CC_SESSION stopping cc session
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+
+% STATHTTPD_CLOSING closing %1#%2
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+
+% STATHTTPD_HANDLE_CONFIG reading configuration: %1
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+
+% STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+
+% STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+
+% STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+
+% STATHTTPD_SERVER_ERROR HTTP server error: %1
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+
+% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+
+% STATHTTPD_SHUTDOWN shutting down
+The stats-httpd daemon is shutting down.
+
+% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+
+% STATHTTPD_STARTED listening on %1#%2
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+
+% STATHTTPD_STARTING_CC_SESSION starting cc session
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+
+% STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+
+% STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
new file mode 100644
index 0000000..9ad07cf
--- /dev/null
+++ b/src/bin/stats/stats_messages.mes
@@ -0,0 +1,75 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the stats_messages python module.
+
+% STATS_BAD_OPTION_VALUE bad command line argument: %1
+The stats module was called with a bad command-line argument and will
+not start.
+
+% STATS_CC_SESSION_ERROR error connecting to message bus: %1
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+
+% STATS_RECEIVED_NEW_CONFIG received new configuration: %1
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+
+% STATS_RECEIVED_REMOVE_COMMAND received command to remove %1
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+
+% STATS_RECEIVED_RESET_COMMAND received command to reset all statistics
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+
+% STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
+The stats module received a command to show all statistics that it has
+collected.
+
+% STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1
+The stats module received a command to show the statistics that it has
+collected for the given item.
+
+% STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+A shutdown command was sent to the stats module and it will now shut down.
+
+% STATS_RECEIVED_STATUS_COMMAND received command to return status
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+
+% STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+
+% STATS_SEND_REQUEST_BOSS requesting boss to send statistics
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+
+% STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+
+% STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+
+
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index 5a13277..ee79de2 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -4,6 +4,13 @@ PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -13,7 +20,13 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
B10_FROM_SOURCE=$(abs_top_srcdir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 07999ea..6d72dc2 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -57,13 +57,9 @@ class TestHttpHandler(unittest.TestCase):
"""Tests for HttpHandler class"""
def setUp(self):
- self.verbose = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
+ self.stats_httpd = stats_httpd.StatsHttpd()
self.assertTrue(type(self.stats_httpd.httpd) is list)
self.httpd = self.stats_httpd.httpd
- for ht in self.httpd:
- self.assertTrue(ht.verbose)
- self.stats_httpd.cc_session.verbose = False
def test_do_GET(self):
for ht in self.httpd:
@@ -155,21 +151,6 @@ class TestHttpHandler(unittest.TestCase):
handler.do_HEAD()
self.assertEqual(handler.response.code, 404)
- def test_log_message(self):
- for ht in self.httpd:
- self._test_log_message(ht._handler)
-
- def _test_log_message(self, handler):
- # switch write_log function
- handler.server.log_writer = handler.response._write_log
- log_message = 'ABCDEFG'
- handler.log_message("%s", log_message)
- self.assertEqual(handler.response.log,
- "[b10-stats-httpd] %s - - [%s] %s\n" %
- (handler.address_string(),
- handler.log_date_time_string(),
- log_message))
-
class TestHttpServerError(unittest.TestCase):
"""Tests for HttpServerError exception"""
@@ -183,12 +164,9 @@ class TestHttpServer(unittest.TestCase):
"""Tests for HttpServer class"""
def test_httpserver(self):
- self.verbose = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
+ self.stats_httpd = stats_httpd.StatsHttpd()
for ht in self.stats_httpd.httpd:
self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
- self.assertEqual(ht.verbose, self.verbose)
self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
@@ -209,17 +187,14 @@ class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
def setUp(self):
- self.verbose = True
fake_socket._CLOSED = False
fake_socket.has_ipv6 = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
+ self.stats_httpd = stats_httpd.StatsHttpd()
def tearDown(self):
self.stats_httpd.stop()
def test_init(self):
- self.assertTrue(self.stats_httpd.verbose)
self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
id(self.stats_httpd.mccs.get_socket()))
@@ -317,8 +292,7 @@ class TestStatsHttpd(unittest.TestCase):
self.stats_httpd.cc_session.group_sendmsg(
{ 'command': [ "shutdown" ] }, "StatsHttpd")
self.stats_httpd.start()
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
+ self.stats_httpd = stats_httpd.StatsHttpd()
self.assertRaises(
fake_select.error, self.stats_httpd.start)
@@ -428,6 +402,95 @@ class TestStatsHttpd(unittest.TestCase):
)
self.assertEqual(ret, 1)
+ def test_xml_handler(self):
+ orig_get_stats_data = stats_httpd.StatsHttpd.get_stats_data
+ stats_httpd.StatsHttpd.get_stats_data = lambda x: {'foo':'bar'}
+ xml_body1 = stats_httpd.StatsHttpd().open_template(
+ stats_httpd.XML_TEMPLATE_LOCATION).substitute(
+ xml_string='<foo>bar</foo>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE,
+ xsd_url_path=stats_httpd.XSD_URL_PATH,
+ xsl_url_path=stats_httpd.XSL_URL_PATH)
+ xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ self.assertEqual(type(xml_body1), str)
+ self.assertEqual(type(xml_body2), str)
+ self.assertEqual(xml_body1, xml_body2)
+ stats_httpd.StatsHttpd.get_stats_data = lambda x: {'bar':'foo'}
+ xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ self.assertNotEqual(xml_body1, xml_body2)
+ stats_httpd.StatsHttpd.get_stats_data = orig_get_stats_data
+
+ def test_xsd_handler(self):
+ orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
+ stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ xsd_body1 = stats_httpd.StatsHttpd().open_template(
+ stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
+ xsd_string='<all>' \
+ + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
+ + '<annotation><appinfo>Foo</appinfo>' \
+ + '<documentation>foo is bar</documentation>' \
+ + '</annotation></element></all>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ self.assertEqual(type(xsd_body1), str)
+ self.assertEqual(type(xsd_body2), str)
+ self.assertEqual(xsd_body1, xsd_body2)
+ stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ self.assertNotEqual(xsd_body1, xsd_body2)
+ stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
+
+ def test_xsl_handler(self):
+ orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
+ stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ xsl_body1 = stats_httpd.StatsHttpd().open_template(
+ stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
+ xsl_string='<xsl:template match="*"><tr>' \
+ + '<td class="title" title="foo is bar">Foo</td>' \
+ + '<td><xsl:value-of select="foo" /></td>' \
+ + '</tr></xsl:template>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ self.assertEqual(type(xsl_body1), str)
+ self.assertEqual(type(xsl_body2), str)
+ self.assertEqual(xsl_body1, xsl_body2)
+ stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ self.assertNotEqual(xsl_body1, xsl_body2)
+ stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
+
def test_for_without_B10_FROM_SOURCE(self):
# just lets it go through the code without B10_FROM_SOURCE env
# variable
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index 818b67a..2fb4ab5 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -23,22 +23,26 @@ import unittest
import imp
from isc.cc.session import Session, SessionError
from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
+from fake_time import time, strftime, gmtime
import stats
+stats.time = time
+stats.strftime = strftime
+stats.gmtime = gmtime
from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-# setting Constant
-if sys.path[0] == '':
- TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
+if "B10_FROM_SOURCE" in os.environ:
+ TEST_SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
+ "/src/bin/stats/tests/testdata/stats_test.spec"
else:
- TEST_SPECFILE_LOCATION = sys.path[0] + "/testdata/stats_test.spec"
+ TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
class TestStats(unittest.TestCase):
def setUp(self):
self.session = Session()
- self.subject = SessionSubject(session=self.session, verbose=True)
- self.listener = CCSessionListener(self.subject, verbose=True)
+ self.subject = SessionSubject(session=self.session)
+ self.listener = CCSessionListener(self.subject)
self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
self.stats_data = {
@@ -55,6 +59,7 @@ class TestStats(unittest.TestCase):
# check starting
self.assertFalse(self.subject.running)
self.subject.start()
+ self.assertEqual(len(self.session.old_message_queue), 1)
self.assertTrue(self.subject.running)
self.assertEqual(len(self.session.message_queue), 0)
self.assertEqual(self.module_name, 'Stats')
@@ -505,16 +510,16 @@ class TestStats(unittest.TestCase):
def test_for_boss(self):
last_queue = self.session.old_message_queue.pop()
self.assertEqual(
- last_queue.msg, {'command': ['sendstats']})
+ last_queue.msg, {'command': ['getstats']})
self.assertEqual(
last_queue.env['group'], 'Boss')
class TestStats2(unittest.TestCase):
def setUp(self):
- self.session = Session(verbose=True)
- self.subject = SessionSubject(session=self.session, verbose=True)
- self.listener = CCSessionListener(self.subject, verbose=True)
+ self.session = Session()
+ self.subject = SessionSubject(session=self.session)
+ self.listener = CCSessionListener(self.subject)
self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
# check starting
self.assertFalse(self.subject.running)
@@ -540,13 +545,18 @@ class TestStats2(unittest.TestCase):
os.environ["B10_FROM_SOURCE"] + os.sep + \
"src" + os.sep + "bin" + os.sep + "stats" + \
os.sep + "stats.spec")
+ self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
+ os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats" + \
+ os.sep + "stats-schema.spec")
imp.reload(stats)
# change path of SPECFILE_LOCATION
stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
+ stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
- self.subject = stats.SessionSubject(session=self.session, verbose=True)
+ self.subject = stats.SessionSubject(session=self.session)
self.session = self.subject.session
- self.listener = stats.CCSessionListener(self.subject, verbose=True)
+ self.listener = stats.CCSessionListener(self.subject)
self.assertEqual(self.listener.stats_spec, [])
self.assertEqual(self.listener.stats_data, {})
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
index 879e8a8..79263a9 100644
--- a/src/bin/stats/tests/http/Makefile.am
+++ b/src/bin/stats/tests/http/Makefile.am
@@ -1,2 +1,6 @@
EXTRA_DIST = __init__.py server.py
CLEANFILES = __init__.pyc server.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
index 059107a..bdfa1eb 100644
--- a/src/bin/stats/tests/isc/Makefile.am
+++ b/src/bin/stats/tests/isc/Makefile.am
@@ -1,3 +1,8 @@
-SUBDIRS = cc config util
+SUBDIRS = cc config util log log_messages
EXTRA_DIST = __init__.py
CLEANFILES = __init__.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
index ccf4dde..67323b5 100644
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ b/src/bin/stats/tests/isc/cc/Makefile.am
@@ -1,2 +1,7 @@
EXTRA_DIST = __init__.py session.py
CLEANFILES = __init__.pyc session.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
index e16d6a9..e18a695 100644
--- a/src/bin/stats/tests/isc/cc/session.py
+++ b/src/bin/stats/tests/isc/cc/session.py
@@ -115,8 +115,16 @@ class Session:
def group_recvmsg(self, nonblock=True, seq=0):
que = self.dequeue()
+ if que.msg != None:
+ cmd = que.msg.get("command")
+ if cmd and cmd[0] == 'getstats':
+ # Create answer for command 'getstats'
+ retdata = { "stats_data": {
+ 'bind10.boot_time' : "1970-01-01T00:00:00Z"
+ }}
+ return {'result': [0, retdata]}, que.env
return que.msg, que.env
-
+
def group_reply(self, routing, msg):
return self.enqueue(msg=msg, env={
"type": "send",
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
index 5b0379a..ffbecda 100644
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ b/src/bin/stats/tests/isc/config/Makefile.am
@@ -1,2 +1,7 @@
EXTRA_DIST = __init__.py ccsession.py
CLEANFILES = __init__.pyc ccsession.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
index a4e9c37..50f7c1b 100644
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ b/src/bin/stats/tests/isc/config/ccsession.py
@@ -23,6 +23,7 @@ external module.
import json
import os
+import time
from isc.cc.session import Session
COMMAND_CONFIG_UPDATE = "config_update"
@@ -72,6 +73,9 @@ class ModuleSpecError(Exception):
class ModuleSpec:
def __init__(self, module_spec, check = True):
+ # check only confi_data for testing
+ if check and "config_data" in module_spec:
+ _check_config_spec(module_spec["config_data"])
self._module_spec = module_spec
def get_config_spec(self):
@@ -83,6 +87,91 @@ class ModuleSpec:
def get_module_name(self):
return self._module_spec['module_name']
+def _check_config_spec(config_data):
+ # config data is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the configuration part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(config_data) != list:
+ raise ModuleSpecError("config_data is of type " + str(type(config_data)) + ", not a list of items")
+ for config_item in config_data:
+ _check_item_spec(config_item)
+
+def _check_item_spec(config_item):
+ """Checks the dict that defines one config item
+ (i.e. containing "item_name", "item_type", etc.
+ Raises a ModuleSpecError if there is an error"""
+ if type(config_item) != dict:
+ raise ModuleSpecError("item spec not a dict")
+ if "item_name" not in config_item:
+ raise ModuleSpecError("no item_name in config item")
+ if type(config_item["item_name"]) != str:
+ raise ModuleSpecError("item_name is not a string: " + str(config_item["item_name"]))
+ item_name = config_item["item_name"]
+ if "item_type" not in config_item:
+ raise ModuleSpecError("no item_type in config item")
+ item_type = config_item["item_type"]
+ if type(item_type) != str:
+ raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
+ if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+ raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
+ if "item_optional" in config_item:
+ if type(config_item["item_optional"]) != bool:
+ raise ModuleSpecError("item_default in " + item_name + " is not a boolean")
+ if not config_item["item_optional"] and "item_default" not in config_item:
+ raise ModuleSpecError("no default value for non-optional item " + item_name)
+ else:
+ raise ModuleSpecError("item_optional not in item " + item_name)
+ if "item_default" in config_item:
+ item_default = config_item["item_default"]
+ if (item_type == "integer" and type(item_default) != int) or \
+ (item_type == "real" and type(item_default) != float) or \
+ (item_type == "boolean" and type(item_default) != bool) or \
+ (item_type == "string" and type(item_default) != str) or \
+ (item_type == "list" and type(item_default) != list) or \
+ (item_type == "map" and type(item_default) != dict):
+ raise ModuleSpecError("Wrong type for item_default in " + item_name)
+ # TODO: once we have check_type, run the item default through that with the list|map_item_spec
+ if item_type == "list":
+ if "list_item_spec" not in config_item:
+ raise ModuleSpecError("no list_item_spec in list item " + item_name)
+ if type(config_item["list_item_spec"]) != dict:
+ raise ModuleSpecError("list_item_spec in " + item_name + " is not a dict")
+ _check_item_spec(config_item["list_item_spec"])
+ if item_type == "map":
+ if "map_item_spec" not in config_item:
+ raise ModuleSpecError("no map_item_sepc in map item " + item_name)
+ if type(config_item["map_item_spec"]) != list:
+ raise ModuleSpecError("map_item_spec in " + item_name + " is not a list")
+ for map_item in config_item["map_item_spec"]:
+ if type(map_item) != dict:
+ raise ModuleSpecError("map_item_spec element is not a dict")
+ _check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ time.strptime(value, time_formats[fmt])
+ return True
+ except (ValueError, TypeError):
+ break
+ return False
+
class ModuleCCSessionError(Exception):
pass
diff --git a/src/bin/stats/tests/isc/log/Makefile.am b/src/bin/stats/tests/isc/log/Makefile.am
new file mode 100644
index 0000000..457b9de
--- /dev/null
+++ b/src/bin/stats/tests/isc/log/Makefile.am
@@ -0,0 +1,7 @@
+EXTRA_DIST = __init__.py
+CLEANFILES = __init__.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log/__init__.py b/src/bin/stats/tests/isc/log/__init__.py
new file mode 100644
index 0000000..641cf79
--- /dev/null
+++ b/src/bin/stats/tests/isc/log/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed. The log.so is installed into the right place.
+# It is only to find it in the .libs directory when we run as a test or
+# from the build directory.
+# But as nobody gives us the builddir explicitly (and we can't use generation
+# from .in file, as it would put us into the builddir and we wouldn't be found)
+# we guess from current directory. Any idea for something better? This should
+# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
+# Should we look there? Or define something in bind10_config?
+
+import os
+import sys
+
+for base in sys.path[:]:
+ loglibdir = os.path.join(base, 'isc/log/.libs')
+ if os.path.exists(loglibdir):
+ sys.path.insert(0, loglibdir)
+
+from log import *
diff --git a/src/bin/stats/tests/isc/log_messages/Makefile.am b/src/bin/stats/tests/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..90b4499
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/Makefile.am
@@ -0,0 +1,7 @@
+EXTRA_DIST = __init__.py stats_messages.py stats_httpd_messages.py
+CLEANFILES = __init__.pyc stats_messages.pyc stats_httpd_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log_messages/__init__.py b/src/bin/stats/tests/isc/log_messages/__init__.py
new file mode 100644
index 0000000..58e99e3
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+This is a fake package that acts as a forwarder to the real package.
+'''
diff --git a/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py b/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..0adb0f0
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from work.stats_httpd_messages import *
diff --git a/src/bin/stats/tests/isc/log_messages/stats_messages.py b/src/bin/stats/tests/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..c05a6a8
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/stats_messages.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from work.stats_messages import *
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
index b09fdee..9c74354 100644
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ b/src/bin/stats/tests/isc/util/Makefile.am
@@ -1,2 +1,7 @@
EXTRA_DIST = __init__.py process.py
CLEANFILES = __init__.pyc process.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/stats_test.in b/src/bin/stats/tests/stats_test.in
deleted file mode 100755
index 9a95c5b..0000000
--- a/src/bin/stats/tests/stats_test.in
+++ /dev/null
@@ -1,31 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2010, 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
-export PYTHON_EXEC
-
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_srcdir@/src/bin/stats:@abs_top_srcdir@/src/bin/stats/tests
-export PYTHONPATH
-
-B10_FROM_SOURCE=@abs_top_srcdir@
-export B10_FROM_SOURCE
-
-TEST_PATH=@abs_top_srcdir@/src/bin/stats/tests
-
-cd ${TEST_PATH}
-${PYTHON_EXEC} -O b10-stats_test.py $*
-${PYTHON_EXEC} -O b10-stats-httpd_test.py $*
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 4340c64..0ce992d 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -1,8 +1,16 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = process_rename_test.py
+noinst_SCRIPTS = $(PYTESTS)
# .py will be generated by configure, so we don't have to include it
# in EXTRA_DIST.
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -12,6 +20,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 4b45210..f96c023 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -38,8 +38,10 @@ class TestRename(unittest.TestCase):
Then scan them by looking at the source text
(without actually running them)
"""
- # Regexp to find all the *_SCRIPTS = something lines,
- # including line continuations (backslash and newline)
+ # Regexp to find all the *_SCRIPTS = something lines (except for
+ # noinst_SCRIPTS, which are scripts for tests), including line
+ # continuations (backslash and newline)
+ excluded_lines = re.compile(r'^(noinst_SCRIPTS.*$)', re.MULTILINE)
lines = re.compile(r'^\w+_SCRIPTS\s*=\s*((.|\\\n)*)$',
re.MULTILINE)
# Script name regular expression
@@ -53,7 +55,8 @@ class TestRename(unittest.TestCase):
if 'Makefile' in fs:
makefile = ''.join(open(os.path.join(d,
"Makefile")).readlines())
- for (var, _) in lines.findall(makefile):
+ for (var, _) in lines.findall(re.sub(excluded_lines, '',
+ makefile)):
for (script, _) in scripts.findall(var):
self.__scan(d, script, fun)
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index ee8505e..8d80b22 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -7,11 +7,16 @@ pkglibexec_SCRIPTS = b10-xfrin
b10_xfrindir = $(pkgdatadir)
b10_xfrin_DATA = xfrin.spec
-CLEANFILES = b10-xfrin xfrin.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.pyc
man_MANS = b10-xfrin.8
EXTRA_DIST = $(man_MANS) b10-xfrin.xml
-EXTRA_DIST += xfrin.spec
+EXTRA_DIST += xfrin.spec xfrin_messages.mes
if ENABLE_MAN
@@ -20,8 +25,18 @@ b10-xfrin.8: b10-xfrin.xml
endif
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py : xfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrin_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py
+b10-xfrin: xfrin.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index d0723b5..54dbe7c 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -2,12 +2,12 @@
.\" Title: b10-xfrin
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: September 8, 2010
+.\" Date: September 8, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-XFRIN" "8" "September 8, 2010" "BIND10" "BIND10"
+.TH "B10\-XFRIN" "8" "September 8, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -43,7 +43,7 @@ boss process\&. When triggered it can request and receive a zone transfer and st
.ps -1
.br
.sp
-The Y1 prototype release only supports AXFR\&. IXFR is not implemented\&.
+This prototype release only supports AXFR\&. IXFR is not implemented\&.
.sp .5v
.RE
.PP
@@ -61,14 +61,36 @@ receives its configurations from
.PP
The configurable settings are:
.PP
-\fImaster_addr\fR
-The default is 127\&.0\&.0\&.1\&.
+\fItransfers_in\fR
+defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
.PP
+
+\fIzones\fR
+is a list of zones known to the
+\fBb10\-xfrin\fR
+daemon\&. The list items are:
+\fIname\fR
+(the zone name),
+\fIclass\fR
+(defaults to
+\(lqIN\(rq),
+\fImaster_addr\fR
+(the zone master to transfer from),
\fImaster_port\fR
-The default is 53\&.
-.PP
-\fItransfers\-in\fR
-defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
+(defaults to 53), and
+\fItsig_key\fR
+(optional TSIG key to use)\&. The
+\fItsig_key\fR
+is specified using a full string colon\-delimited name:key:algorithm representation (e\&.g\&.
+\(lqfoo\&.example\&.org:EvABsfU2h7uofnmqaRCrhHunGsd=:hmac\-sha1\(rq)\&.
+.PP
+(The site\-wide
+\fImaster_addr\fR
+and
+\fImaster_port\fR
+configurations are deprecated; use the
+\fIzones\fR
+list configuration instead\&.)
.PP
The configuration commands are:
.PP
@@ -106,7 +128,9 @@ to define the class (defaults to
\fImaster\fR
to define the IP address of the authoritative server to transfer from, and
\fIport\fR
-to define the port number on the authoritative server (defaults to 53)\&.
+to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the value previously defined in the
+\fIzones\fR
+configuration\&.
.PP
\fBshutdown\fR
@@ -143,5 +167,5 @@ The
daemon was implemented in March 2010 by Zhang Likun of CNNIC for the ISC BIND 10 project\&.
.SH "COPYRIGHT"
.br
-Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
+Copyright \(co 2010-2011 Internet Systems Consortium, Inc. ("ISC")
.br
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index fdfe1ef..d45e15f 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>September 8, 2010</date>
+ <date>September 8, 2011</date>
</refentryinfo>
<refmeta>
@@ -36,7 +36,7 @@
<docinfo>
<copyright>
- <year>2010</year>
+ <year>2010-2011</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
@@ -62,6 +62,12 @@
the zone in a BIND 10 zone data store.
</para>
+<!-- TODO:
+xfrin only does the transfer to make it as simple as possible.
+The logic for handling transfer triggers or zone management is handled
+in separate zonemgr process.
+-->
+
<note><simpara>
This prototype release only supports AXFR. IXFR is not implemented.
</simpara></note>
@@ -86,20 +92,34 @@
The configurable settings are:
</para>
- <para><varname>master_addr</varname>
-<!-- TODO: how can there be a single setting for this? -->
- The default is 127.0.0.1.
+ <para><varname>transfers_in</varname>
+ defines the maximum number of inbound zone transfers
+ that can run concurrently. The default is 10.
</para>
- <para><varname>master_port</varname>
-<!-- TODO: what if custom is needed per zone? -->
- The default is 53.
+<!-- TODO: is name okay for master_addr or just IP? -->
+ <para>
+ <varname>zones</varname> is a list of zones known to the
+ <command>b10-xfrin</command> daemon.
+ The list items are:
+ <varname>name</varname> (the zone name),
+ <varname>class</varname> (defaults to <quote>IN</quote>),
+ <varname>master_addr</varname> (the zone master to transfer from),
+ <varname>master_port</varname> (defaults to 53), and
+ <varname>tsig_key</varname> (optional TSIG key to use).
+ The <varname>tsig_key</varname> is specified using a full string
+ colon-delimited name:key:algorithm representation (e.g.
+ <quote>foo.example.org:EvABsfU2h7uofnmqaRCrhHunGsd=:hmac-sha1</quote>).
</para>
+<!-- TODO: document this better -->
+<!-- TODO: the tsig_key format may change -->
- <para><varname>transfers-in</varname>
- defines the maximum number of inbound zone transfers
- that can run concurrently. The default is 10.
+ <para>
+ (The site-wide <varname>master_addr</varname> and
+ <varname>master_port</varname> configurations are deprecated;
+ use the <varname>zones</varname> list configuration instead.)
</para>
+<!-- NOTE: also tsig_key but not mentioning since so short lived. -->
<!-- TODO: formating -->
<para>
@@ -148,6 +168,9 @@
the authoritative server to transfer from,
and <varname>port</varname> to define the port number on the
authoritative server (defaults to 53).
+ If the address or port is not specified, it will use the
+ value previously defined in the <varname>zones</varname>
+ configuration.
</para>
<!-- TODO: later hostname for master? -->
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index d4efbc7..3d56009 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
$(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 04d04a6..05cce98 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2009 Internet Systems Consortium.
+# Copyright (C) 2009-2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -15,13 +15,18 @@
import unittest
import socket
+import io
+from isc.testutils.tsigctx_mock import MockTSIGContext
from xfrin import *
+import isc.log
#
# Commonly used (mostly constant) test parameters
#
-TEST_ZONE_NAME = "example.com"
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS_STR = 'IN'
TEST_DB_FILE = 'db_file'
TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
@@ -35,15 +40,17 @@ TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
# If some other process uses this port test will fail.
TEST_MASTER_PORT = '53535'
+TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+
soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
'master.example.com. admin.example.com ' +
'1234 3600 1800 2419200 7200')
-soa_rrset = RRset(Name(TEST_ZONE_NAME), TEST_RRCLASS, RRType.SOA(),
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
RRTTL(3600))
soa_rrset.add_rdata(soa_rdata)
-example_axfr_question = Question(Name(TEST_ZONE_NAME), TEST_RRCLASS,
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
RRType.AXFR())
-example_soa_question = Question(Name(TEST_ZONE_NAME), TEST_RRCLASS,
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
RRType.SOA())
default_questions = [example_axfr_question]
default_answers = [soa_rrset]
@@ -51,6 +58,13 @@ default_answers = [soa_rrset]
class XfrinTestException(Exception):
pass
+class MockCC():
+ def get_default_value(self, identifier):
+ if identifier == "zones/master_port":
+ return TEST_MASTER_PORT
+ if identifier == "zones/class":
+ return TEST_RRCLASS_STR
+
class MockXfrin(Xfrin):
# This is a class attribute of a callable object that specifies a non
# default behavior triggered in _cc_check_command(). Specific test methods
@@ -60,16 +74,28 @@ class MockXfrin(Xfrin):
check_command_hook = None
def _cc_setup(self):
+ self._tsig_key = None
+ self._module_cc = MockCC()
pass
def _get_db_file(self):
pass
-
+
def _cc_check_command(self):
self._shutdown_event.set()
if MockXfrin.check_command_hook:
MockXfrin.check_command_hook()
+ def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+ tsig_key, check_soa=True):
+ # store some of the arguments for verification, then call this
+ # method in the superclass
+ self.xfrin_started_master_addr = master_addrinfo[2][0]
+ self.xfrin_started_master_port = master_addrinfo[2][1]
+ return Xfrin.xfrin_start(self, zone_name, rrclass, db_file,
+ master_addrinfo, tsig_key,
+ check_soa)
+
class MockXfrinConnection(XfrinConnection):
def __init__(self, sock_map, zone_name, rrclass, db_file, shutdown_event,
master_addr):
@@ -121,10 +147,11 @@ class MockXfrinConnection(XfrinConnection):
self.response_generator()
return len(data)
- def create_response_data(self, response = True, bad_qid = False,
- rcode = Rcode.NOERROR(),
- questions = default_questions,
- answers = default_answers):
+ def create_response_data(self, response=True, bad_qid=False,
+ rcode=Rcode.NOERROR(),
+ questions=default_questions,
+ answers=default_answers,
+ tsig_ctx=None):
resp = Message(Message.RENDER)
qid = self.qid
if bad_qid:
@@ -138,7 +165,10 @@ class MockXfrinConnection(XfrinConnection):
[resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
renderer = MessageRenderer()
- resp.to_wire(renderer)
+ if tsig_ctx is not None:
+ resp.to_wire(renderer, tsig_ctx)
+ else:
+ resp.to_wire(renderer)
reply_data = struct.pack('H', socket.htons(renderer.get_length()))
reply_data += renderer.get_data()
@@ -153,20 +183,44 @@ class TestXfrinConnection(unittest.TestCase):
TEST_RRCLASS, TEST_DB_FILE,
threading.Event(),
TEST_MASTER_IPV4_ADDRINFO)
- self.axfr_after_soa = False
self.soa_response_params = {
'questions': [example_soa_question],
'bad_qid': False,
'response': True,
'rcode': Rcode.NOERROR(),
+ 'tsig': False,
'axfr_after_soa': self._create_normal_response_data
}
+ self.axfr_response_params = {
+ 'tsig_1st': None,
+ 'tsig_2nd': None
+ }
def tearDown(self):
self.conn.close()
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
+ def __create_mock_tsig(self, key, error):
+ # This helper function creates a MockTSIGContext for a given key
+ # and TSIG error to be used as a result of verify (normally faked
+ # one)
+ mock_ctx = MockTSIGContext(key)
+ mock_ctx.error = error
+ return mock_ctx
+
+ def __match_exception(self, expected_exception, expected_msg, expression):
+ # This helper method is a higher-granularity version of assertRaises().
+ # If it's not sufficient to check the exception class (e.g., when
+ # the same type of exceptions can be thrown from many places), this
+ # method can be used to check it with the exception argument.
+ try:
+ expression()
+ except expected_exception as ex:
+ self.assertEqual(str(ex), expected_msg)
+ else:
+ self.assertFalse('exception is expected, but not raised')
+
def test_close(self):
# we shouldn't be using the global asyncore map.
self.assertEqual(len(asyncore.socket_map), 0)
@@ -196,10 +250,53 @@ class TestXfrinConnection(unittest.TestCase):
RRClass.CH())
c.close()
+ def test_send_query(self):
+ def create_msg(query_type):
+ msg = Message(Message.RENDER)
+ query_id = 0x1035
+ msg.set_qid(query_id)
+ msg.set_opcode(Opcode.QUERY())
+ msg.set_rcode(Rcode.NOERROR())
+ query_question = Question(Name("example.com."), RRClass.IN(), query_type)
+ msg.add_question(query_question)
+ return msg
+
+ def message_has_tsig(data):
+ # a simple check if the actual data contains a TSIG RR.
+ # At our level this simple check should suffice; other detailed
+ # tests regarding the TSIG protocol are done in pydnspp.
+ msg = Message(Message.PARSE)
+ msg.from_wire(data)
+ return msg.get_tsig_record() is not None
+
+ self.conn._create_query = create_msg
+ # soa request
+ self.conn._send_query(RRType.SOA())
+ self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x06\x00\x01')
+ # axfr request
+ self.conn._send_query(RRType.AXFR())
+ self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+
+ # soa request with tsig
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._send_query(RRType.SOA())
+ self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
+
+ # axfr request with tsig
+ self.conn._send_query(RRType.AXFR())
+ self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
+
def test_response_with_invalid_msg(self):
self.conn.reply_data = b'aaaxxxx'
self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+ def test_response_with_tsigfail(self):
+ self.conn._tsig_key = TSIG_KEY
+ # server tsig check fail, return with RCODE 9 (NOTAUTH)
+ self.conn._send_query(RRType.SOA())
+ self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+ self.assertRaises(XfrinException, self._handle_xfrin_response)
+
def test_response_without_end_soa(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data()
@@ -210,6 +307,31 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.reply_data = self.conn.create_response_data(bad_qid = True)
self.assertRaises(XfrinException, self._handle_xfrin_response)
+ def test_response_error_code_bad_sig(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(
+ rcode=Rcode.SERVFAIL())
+ # xfrin should check TSIG before other part of incoming message
+ # validate log message for XfrinException
+ self.__match_exception(XfrinException,
+ "TSIG verify fail: BADSIG",
+ self._handle_xfrin_response)
+
+ def test_response_bad_qid_bad_key(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_KEY)
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
+ # xfrin should check TSIG before other part of incoming message
+ # validate log message for XfrinException
+ self.__match_exception(XfrinException,
+ "TSIG verify fail: BADKEY",
+ self._handle_xfrin_response)
+
def test_response_non_response(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(response = False)
@@ -254,6 +376,18 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ def test_soacheck_bad_qid_bad_sig(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.soa_response_params['bad_qid'] = True
+ self.conn.response_generator = self._create_soa_response_data
+ # xfrin should check TSIG before other part of incoming message
+ # validate log message for XfrinException
+ self.__match_exception(XfrinException,
+ "TSIG verify fail: BADSIG",
+ self.conn._check_soa_serial)
+
def test_soacheck_non_response(self):
self.soa_response_params['response'] = False
self.conn.response_generator = self._create_soa_response_data
@@ -264,6 +398,54 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ def test_soacheck_with_tsig(self):
+ # Use a mock tsig context emulating a validly signed response
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertEqual(self.conn._check_soa_serial(), XFRIN_OK)
+ self.assertEqual(self.conn._tsig_ctx.get_error(), TSIGError.NOERROR)
+
+ def test_soacheck_with_tsig_notauth(self):
+ # emulate a valid error response
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.soa_response_params['rcode'] = Rcode.NOTAUTH()
+ self.conn.response_generator = self._create_soa_response_data
+
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
+ def test_soacheck_with_tsig_noerror_badsig(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+
+ # emulate a normal response bad verification failure due to BADSIG.
+ # According RFC2845, in this case we should ignore it and keep
+ # waiting for a valid response until a timeout. But we immediately
+ # treat this as a final failure (just as BIND 9 does).
+ self.conn.response_generator = self._create_soa_response_data
+
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
+ def test_soacheck_with_tsig_unsigned_response(self):
+ # we can use a real TSIGContext for this. the response doesn't
+ # contain a TSIG while we sent a signed query. RFC2845 states
+ # we should wait for a valid response in this case, but we treat
+ # it as a fatal transaction failure, too.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
+ def test_soacheck_with_unexpected_tsig_response(self):
+ # we reject unexpected TSIG in responses (following BIND 9's
+ # behavior)
+ self.soa_response_params['tsig'] = True
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
def test_response_shutdown(self):
self.conn.response_generator = self._create_normal_response_data
self.conn._shutdown_event.set()
@@ -297,6 +479,88 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+ def test_do_xfrin_with_tsig(self):
+ # use TSIG with a mock context. we fake all verify results to
+ # emulate successful verification.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+ # We use two messages in the tests. The same context should have been
+ # usef for both.
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_tsig_fail(self):
+ # TSIG verify will fail for the first message. xfrin should fail
+ # immediately.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(1, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_tsig_fail_for_second_message(self):
+ # Similar to the previous test, but first verify succeeds. There
+ # should be a second verify attempt, which will fail, which should
+ # make xfrin fail.
+ def fake_tsig_error(ctx):
+ if self.conn._tsig_ctx.verify_called == 1:
+ return TSIGError.NOERROR
+ return TSIGError.BAD_SIG
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, fake_tsig_error)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_missing_tsig(self):
+ # XFR request sent with TSIG, but the response doesn't have TSIG.
+ # xfr should fail.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, None)
+ self.conn._tsig_ctx = MockTSIGContext(TSIG_KEY)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(1, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_missing_tsig_for_second_message(self):
+ # Similar to the previous test, but firt one contains TSIG and verify
+ # succeeds (due to fake). The second message lacks TSIG.
+ #
+ # Note: this test case is actually not that trivial: Skipping
+ # intermediate TSIG is allowed. In this case, however, the second
+ # message is the last one, which must contain TSIG anyway, so the
+ # expected result is correct. If/when we support skipping
+ # intermediate TSIGs, we'll need additional test cases.
+ def fake_tsig_error(ctx):
+ if self.conn._tsig_ctx.verify_called == 1:
+ return TSIGError.NOERROR
+ return TSIGError.FORMERR
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, fake_tsig_error)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_unexpected_tsig(self):
+ # XFR request wasn't signed, but response includes TSIG. Like BIND 9,
+ # we reject that.
+ self.axfr_response_params['tsig_1st'] = TSIGContext(TSIG_KEY)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+
+ def test_do_xfrin_with_unexpected_tsig_for_second_message(self):
+ # similar to the previous test, but the first message is normal.
+ # the second one contains an unexpected TSIG. should be rejected.
+ self.axfr_response_params['tsig_2nd'] = TSIGContext(TSIG_KEY)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+
def test_do_xfrin_empty_response(self):
# skipping the creation of response data, so the transfer will fail.
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
@@ -315,6 +579,23 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+ def test_do_soacheck_and_xfrin_with_tsig(self):
+ # We are going to have a SOA query/response transaction, followed by
+ # AXFR, all TSIG signed. xfrin should use a new TSIG context for
+ # AXFR. We are not interested in whether verify works correctly in
+ # this test, so we simply fake the results (they need to succeed for
+ # this test)
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
+ self.soa_response_params['tsig'] = True
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+ # We should've got 3 response messages: 1 SOA and two AXFR, but
+ # the context should be replaced for AXFR, so verify() should be
+ # called only twice for the latest context.
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
def test_do_soacheck_broken_response(self):
self.conn.response_generator = self._create_broken_response_data
# XXX: TODO: this test failed here, should xfr not raise an
@@ -342,21 +623,39 @@ class TestXfrinConnection(unittest.TestCase):
# This helper method creates a simple sequence of DNS messages that
# forms a valid XFR transaction. It consists of two messages, each
# containing just a single SOA RR.
- self.conn.reply_data = self.conn.create_response_data()
- self.conn.reply_data += self.conn.create_response_data()
+ tsig_1st = self.axfr_response_params['tsig_1st']
+ tsig_2nd = self.axfr_response_params['tsig_2nd']
+ self.conn.reply_data = self.conn.create_response_data(tsig_ctx=tsig_1st)
+ self.conn.reply_data += \
+ self.conn.create_response_data(tsig_ctx=tsig_2nd)
def _create_soa_response_data(self):
# This helper method creates a DNS message that is supposed to be
# used a valid response to SOA queries prior to XFR.
+ # If tsig is True, it tries to verify the query with a locally
+ # created TSIG context (which may or may not succeed) so that the
+ # response will include a TSIG.
# If axfr_after_soa is True, it resets the response_generator so that
# a valid XFR messages will follow.
+
+ verify_ctx = None
+ if self.soa_response_params['tsig']:
+ # xfrin (curreently) always uses TCP. strip off the length field.
+ query_data = self.conn.query_data[2:]
+ query_message = Message(Message.PARSE)
+ query_message.from_wire(query_data)
+ verify_ctx = TSIGContext(TSIG_KEY)
+ verify_ctx.verify(query_message.get_tsig_record(), query_data)
+
self.conn.reply_data = self.conn.create_response_data(
bad_qid=self.soa_response_params['bad_qid'],
response=self.soa_response_params['response'],
rcode=self.soa_response_params['rcode'],
- questions=self.soa_response_params['questions'])
+ questions=self.soa_response_params['questions'],
+ tsig_ctx=verify_ctx)
if self.soa_response_params['axfr_after_soa'] != None:
- self.conn.response_generator = self.soa_response_params['axfr_after_soa']
+ self.conn.response_generator = \
+ self.soa_response_params['axfr_after_soa']
def _create_broken_response_data(self):
# This helper method creates a bogus "DNS message" that only contains
@@ -399,21 +698,28 @@ class TestXfrinRecorder(unittest.TestCase):
class TestXfrin(unittest.TestCase):
def setUp(self):
+ # redirect output
+ self.stderr_backup = sys.stderr
+ sys.stderr = open(os.devnull, 'w')
self.xfr = MockXfrin()
self.args = {}
- self.args['zone_name'] = TEST_ZONE_NAME
+ self.args['zone_name'] = TEST_ZONE_NAME_STR
+ self.args['class'] = TEST_RRCLASS_STR
self.args['port'] = TEST_MASTER_PORT
self.args['master'] = TEST_MASTER_IPV4_ADDRESS
self.args['db_file'] = TEST_DB_FILE
+ self.args['tsig_key'] = ''
def tearDown(self):
self.xfr.shutdown()
+ sys.stderr= self.stderr_backup
def _do_parse_zone_name_class(self):
return self.xfr._parse_zone_name_and_class(self.args)
def _do_parse_master_port(self):
- return self.xfr._parse_master_and_port(self.args)
+ name, rrclass = self._do_parse_zone_name_class()
+ return self.xfr._parse_master_and_port(self.args, name, rrclass)
def test_parse_cmd_params(self):
name, rrclass = self._do_parse_zone_name_class()
@@ -441,7 +747,7 @@ class TestXfrin(unittest.TestCase):
def test_parse_cmd_params_bogusclass(self):
self.args['zone_class'] = 'XXX'
- self.assertRaises(XfrinException, self._do_parse_zone_name_class)
+ self.assertRaises(XfrinZoneInfoException, self._do_parse_zone_name_class)
def test_parse_cmd_params_nozone(self):
# zone name is mandatory.
@@ -451,8 +757,7 @@ class TestXfrin(unittest.TestCase):
def test_parse_cmd_params_nomaster(self):
# master address is mandatory.
del self.args['master']
- master_addrinfo = self._do_parse_master_port()
- self.assertEqual(master_addrinfo[2][0], DEFAULT_MASTER)
+ self.assertRaises(XfrinException, self._do_parse_master_port)
def test_parse_cmd_params_bad_ip4(self):
self.args['master'] = '3.3.3.3.3'
@@ -482,6 +787,77 @@ class TestXfrin(unittest.TestCase):
def test_command_handler_retransfer(self):
self.assertEqual(self.xfr.command_handler("retransfer",
self.args)['result'][0], 0)
+ self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
+
+ def test_command_handler_retransfer_short_command1(self):
+ # try it when only specifying the zone name (of unknown zone)
+ # this should fail because master address is not specified.
+ short_args = {}
+ short_args['zone_name'] = TEST_ZONE_NAME_STR
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 1)
+
+ def test_command_handler_retransfer_short_command2(self):
+ # try it when only specifying the zone name (of known zone)
+ short_args = {}
+ short_args['zone_name'] = TEST_ZONE_NAME_STR
+
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
+
+ def test_command_handler_retransfer_short_command3(self):
+ # try it when only specifying the zone name (of known zone)
+ short_args = {}
+ # test it without the trailing root dot
+ short_args['zone_name'] = TEST_ZONE_NAME_STR[:-1]
+
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
+
+ def test_command_handler_retransfer_short_command4(self):
+ # try it when only specifying the zone name (of known zone, with
+ # different case)
+ short_args = {}
+
+ # swap the case of the zone name in our command
+ short_args['zone_name'] = TEST_ZONE_NAME_STR.swapcase()
+
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
def test_command_handler_retransfer_badcommand(self):
self.args['master'] = 'invalid'
@@ -489,13 +865,15 @@ class TestXfrin(unittest.TestCase):
self.args)['result'][0], 1)
def test_command_handler_retransfer_quota(self):
+ self.args['master'] = TEST_MASTER_IPV4_ADDRESS
+
for i in range(self.xfr._max_transfers_in - 1):
- self.xfr.recorder.increment(str(i) + TEST_ZONE_NAME)
+ self.xfr.recorder.increment(Name(str(i) + TEST_ZONE_NAME_STR))
# there can be one more outstanding transfer.
self.assertEqual(self.xfr.command_handler("retransfer",
self.args)['result'][0], 0)
# make sure the # xfrs would excceed the quota
- self.xfr.recorder.increment(str(self.xfr._max_transfers_in) + TEST_ZONE_NAME)
+ self.xfr.recorder.increment(Name(str(self.xfr._max_transfers_in) + TEST_ZONE_NAME_STR))
# this one should fail
self.assertEqual(self.xfr.command_handler("retransfer",
self.args)['result'][0], 1)
@@ -519,14 +897,43 @@ class TestXfrin(unittest.TestCase):
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
self.assertEqual(self.xfr.command_handler("refresh",
self.args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV6_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
def test_command_handler_notify(self):
# at this level, refresh is no different than retransfer.
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
- # ...but right now we disable the feature due to security concerns.
+ # ...but the zone is unknown so this would return an error
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+ def test_command_handler_notify_known_zone(self):
+ # try it with a known zone
+ self.args['master'] = TEST_MASTER_IPV6_ADDRESS
+
+ # but use a different address in the actual command
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 0)
+ # and see if we used the address from the command, and not from
+ # the config
+ # This is actually NOT the address given in the command, which
+ # would at this point not make sense, see the TODO in
+ # xfrin.py.in Xfrin.command_handler())
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
+
def test_command_handler_unknown(self):
self.assertEqual(self.xfr.command_handler("xxx", None)['result'][0], 1)
@@ -535,20 +942,153 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.config_handler({'transfers_in': 3})['result'][0], 0)
self.assertEqual(self.xfr._max_transfers_in, 3)
- def test_command_handler_masters(self):
- master_info = {'master_addr': '1.1.1.1', 'master_port':53}
- self.assertEqual(self.xfr.config_handler(master_info)['result'][0], 0)
-
- master_info = {'master_addr': '1111.1.1.1', 'master_port':53 }
- self.assertEqual(self.xfr.config_handler(master_info)['result'][0], 1)
-
- master_info = {'master_addr': '2.2.2.2', 'master_port':530000 }
- self.assertEqual(self.xfr.config_handler(master_info)['result'][0], 1)
-
- master_info = {'master_addr': '2.2.2.2', 'master_port':53 }
- self.xfr.config_handler(master_info)
- self.assertEqual(self.xfr._master_addr, '2.2.2.2')
- self.assertEqual(self.xfr._master_port, 53)
+ def _check_zones_config(self, config_given):
+ if 'transfers_in' in config_given:
+ self.assertEqual(config_given['transfers_in'],
+ self.xfr._max_transfers_in)
+ for zone_config in config_given['zones']:
+ zone_name = zone_config['name']
+ zone_info = self.xfr._get_zone_info(Name(zone_name), RRClass.IN())
+ self.assertEqual(str(zone_info.master_addr), zone_config['master_addr'])
+ self.assertEqual(zone_info.master_port, zone_config['master_port'])
+ if 'tsig_key' in zone_config:
+ self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
+ else:
+ self.assertIsNone(zone_info.tsig_key)
+ if 'ixfr_disabled' in zone_config and\
+ zone_config.get('ixfr_disabled'):
+ self.assertTrue(zone_info.ixfr_disabled)
+ else:
+ # if not set, should default to False
+ self.assertFalse(zone_info.ixfr_disabled)
+
+ def test_command_handler_zones(self):
+ config1 = { 'transfers_in': 3,
+ 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.1',
+ 'master_port': 53,
+ 'ixfr_disabled': False
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
+ self._check_zones_config(config1)
+
+ config2 = { 'transfers_in': 4,
+ 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.2',
+ 'master_port': 53,
+ 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
+ 'ixfr_disabled': True
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
+ self._check_zones_config(config2)
+
+ # test that configuring the zone multiple times fails
+ zones = { 'transfers_in': 5,
+ 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.1',
+ 'master_port': 53
+ },
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.2',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.3',
+ 'master_port': 53,
+ 'class': 'BADCLASS'
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'master_addr': '192.0.2.4',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'bad..zone.',
+ 'master_addr': '192.0.2.5',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': '',
+ 'master_addr': '192.0.2.6',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example',
+ 'master_addr': 'badaddress',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example',
+ 'master_addr': '192.0.2.7',
+ 'master_port': 'bad_port'
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example',
+ 'master_addr': '192.0.2.7',
+ 'master_port': 53,
+ # using a bad TSIG key spec
+ 'tsig_key': "bad..example.com:SFuWd/q99SzF8Yzd1QbB9g=="
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ # let's also add a zone that is correct too, and make sure
+ # that the new config is not partially taken
+ zones = { 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.8',
+ 'master_port': 53
+ },
+ { 'name': 'test2.example.',
+ 'master_addr': '192.0.2.9',
+ 'master_port': 53,
+ 'tsig_key': 'badkey'
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
def raise_interrupt():
@@ -584,6 +1124,7 @@ class TestMain(unittest.TestCase):
if __name__== "__main__":
try:
+ isc.log.resetUnitTestRootLogger()
unittest.main()
except KeyboardInterrupt as e:
print(e)
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 10a866e..a77a383 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -1,6 +1,6 @@
#!@PYTHON@
-# Copyright (C) 2010 Internet Systems Consortium.
+# Copyright (C) 2009-2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -29,12 +29,17 @@ from isc.config.ccsession import *
from isc.notify import notify_out
import isc.util.process
import isc.net.parse
+from isc.log_messages.xfrin_messages import *
+
+isc.log.init("b10-xfrin")
+logger = isc.log.Logger("xfrin")
+
try:
from pydnspp import *
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrin process
# must keep running, so we warn about it and move forward.
- sys.stderr.write('[b10-xfrin] failed to import DNS module: %s\n' % str(e))
+ logger.error(XFRIN_IMPORT_DNS, str(e))
isc.util.process.rename()
@@ -56,26 +61,64 @@ XFROUT_MODULE_NAME = 'Xfrout'
ZONE_MANAGER_MODULE_NAME = 'Zonemgr'
REFRESH_FROM_ZONEMGR = 'refresh_from_zonemgr'
ZONE_XFRIN_FAILED = 'zone_xfrin_failed'
+
+# These two default are currently hard-coded. For config this isn't
+# necessary, but we need these defaults for optional command arguments
+# (TODO: have similar support to get default values for command
+# arguments as we do for config options)
+DEFAULT_MASTER_PORT = 53
+DEFAULT_ZONE_CLASS = RRClass.IN()
+
__version__ = 'BIND10'
# define xfrin rcode
XFRIN_OK = 0
XFRIN_FAIL = 1
-DEFAULT_MASTER_PORT = '53'
-DEFAULT_MASTER = '127.0.0.1'
-
-def log_error(msg):
- sys.stderr.write("[b10-xfrin] %s\n" % str(msg))
+class XfrinException(Exception):
+ pass
-class XfrinException(Exception):
+class XfrinZoneInfoException(Exception):
+ """This exception is raised if there is an error in the given
+ configuration (part), or when a command does not have a required
+ argument or has bad arguments, for instance when the zone's master
+ address is not a valid IP address, when the zone does not
+ have a name, or when multiple settings are given for the same
+ zone."""
pass
+def _check_zone_name(zone_name_str):
+ """Checks if the given zone name is a valid domain name, and returns
+ it as a Name object. Raises an XfrinException if it is not."""
+ try:
+ # In the _zones dict, part of the key is the zone name,
+ # but due to a limitation in the Name class, we
+ # cannot directly use it as a dict key, and we use to_text()
+ #
+ # Downcase the name here for that reason.
+ return Name(zone_name_str, True)
+ except (EmptyLabel, TooLongLabel, BadLabelType, BadEscape,
+ TooLongName, IncompleteName) as ne:
+ raise XfrinZoneInfoException("bad zone name: " + zone_name_str + " (" + str(ne) + ")")
+
+def _check_zone_class(zone_class_str):
+ """If the given argument is a string: checks if the given class is
+ a valid one, and returns an RRClass object if so.
+ Raises XfrinZoneInfoException if not.
+ If it is None, this function returns the default RRClass.IN()"""
+ if zone_class_str is None:
+ return DEFAULT_ZONE_CLASS
+ try:
+ return RRClass(zone_class_str)
+ except InvalidRRClass as irce:
+ raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
+
class XfrinConnection(asyncore.dispatcher):
- '''Do xfrin in this class. '''
+ '''Do xfrin in this class. '''
def __init__(self,
sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addrinfo, verbose = False, idle_timeout = 60):
+ master_addrinfo, tsig_key = None, verbose = False,
+ idle_timeout = 60):
''' idle_timeout: max idle time for read data from socket.
db_file: specify the data source file.
check_soa: when it's true, check soa first before sending xfr query
@@ -93,6 +136,14 @@ class XfrinConnection(asyncore.dispatcher):
self._shutdown_event = shutdown_event
self._verbose = verbose
self._master_address = master_addrinfo[2]
+ self._tsig_key = tsig_key
+ self._tsig_ctx = None
+ # tsig_ctx_creator is introduced to allow tests to use a mock class for
+ # easier tests (in normal case we always use the default)
+ self._tsig_ctx_creator = self.__create_tsig_ctx
+
+ def __create_tsig_ctx(self, key):
+ return TSIGContext(key)
def connect_to_master(self):
'''Connect to master in TCP.'''
@@ -101,8 +152,7 @@ class XfrinConnection(asyncore.dispatcher):
self.connect(self._master_address)
return True
except socket.error as e:
- self.log_msg('Failed to connect:(%s), %s' % (self._master_address,
- str(e)))
+ logger.error(XFRIN_CONNECT_MASTER, self._master_address, str(e))
return False
def _create_query(self, query_type):
@@ -130,9 +180,15 @@ class XfrinConnection(asyncore.dispatcher):
msg = self._create_query(query_type)
render = MessageRenderer()
- msg.to_wire(render)
- header_len = struct.pack('H', socket.htons(render.get_length()))
+ # XXX Currently, python wrapper doesn't accept 'None' parameter in this case,
+ # we should remove the if statement and use a universal interface later.
+ if self._tsig_key is not None:
+ self._tsig_ctx = self._tsig_ctx_creator(self._tsig_key)
+ msg.to_wire(render, self._tsig_ctx)
+ else:
+ msg.to_wire(render)
+ header_len = struct.pack('H', socket.htons(render.get_length()))
self._send_data(header_len)
self._send_data(render.get_data())
@@ -142,7 +198,7 @@ class XfrinConnection(asyncore.dispatcher):
_get_request_response so that we can test the rest of the code without
involving actual communication with a remote server.'''
asyncore.loop(self._idle_timeout, map=self._sock_map, count=1)
-
+
def _get_request_response(self, size):
recv_size = 0
data = b''
@@ -158,6 +214,22 @@ class XfrinConnection(asyncore.dispatcher):
return data
+ def _check_response_tsig(self, msg, response_data):
+ tsig_record = msg.get_tsig_record()
+ if self._tsig_ctx is not None:
+ tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
+ if tsig_error != TSIGError.NOERROR:
+ raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+ elif tsig_record is not None:
+ # If the response includes a TSIG while we didn't sign the query,
+ # we treat it as an error. RFC doesn't say anything about this
+ # case, but it clearly states the server must not sign a response
+ # to an unsigned request. Although we could be flexible, no sane
+ # implementation would return such a response, and since this is
+ # part of security mechanism, it's probably better to be more
+ # strict.
+ raise XfrinException('Unexpected TSIG in response')
+
def _check_soa_serial(self):
''' Compare the soa serial, if soa serial in master is less than
the soa serial in local, Finish xfrin.
@@ -165,18 +237,21 @@ class XfrinConnection(asyncore.dispatcher):
True: soa serial in master is bigger
'''
- self._send_query(RRType("SOA"))
+ self._send_query(RRType.SOA())
data_len = self._get_request_response(2)
msg_len = socket.htons(struct.unpack('H', data_len)[0])
soa_response = self._get_request_response(msg_len)
msg = Message(Message.PARSE)
msg.from_wire(soa_response)
+ # TSIG related checks, including an unexpected signed response
+ self._check_response_tsig(msg, soa_response)
+
# perform some minimal level validation. It's an open issue how
# strict we should be (see the comment in _check_response_header())
self._check_response_header(msg)
- # TODO, need select soa record from data source then compare the two
+ # TODO, need select soa record from data source then compare the two
# serial, current just return OK, since this function hasn't been used
# now.
return XFRIN_OK
@@ -190,32 +265,27 @@ class XfrinConnection(asyncore.dispatcher):
logstr = 'SOA check for \'%s\' ' % self._zone_name
ret = self._check_soa_serial()
- logstr = 'transfer of \'%s\': AXFR ' % self._zone_name
if ret == XFRIN_OK:
- self.log_msg(logstr + 'started')
- # TODO: .AXFR() RRType.AXFR()
- self._send_query(RRType(252))
+ logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
+ self._send_query(RRType.AXFR())
isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
self._handle_xfrin_response)
- self.log_msg(logstr + 'succeeded')
+ logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
except XfrinException as e:
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
ret = XFRIN_FAIL
#TODO, recover data source.
except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
ret = XFRIN_FAIL
except UserWarning as e:
# XXX: this is an exception from our C++ library via the
# Boost.Python binding. It would be better to have more more
# specific exceptions, but at this moment this is the finest
# granularity.
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
ret = XFRIN_FAIL
finally:
self.close()
@@ -238,7 +308,7 @@ class XfrinConnection(asyncore.dispatcher):
raise XfrinException('error response: %s' % msg_rcode.to_text())
if not msg.get_header_flag(Message.HEADERFLAG_QR):
- raise XfrinException('response is not a response ')
+ raise XfrinException('response is not a response')
if msg.get_qid() != self._query_id:
raise XfrinException('bad query id')
@@ -265,7 +335,7 @@ class XfrinConnection(asyncore.dispatcher):
for rdata in rrset.get_rdata():
# Count the soa record count
- if rrset.get_type() == RRType("SOA"):
+ if rrset.get_type() == RRType.SOA():
self._soa_rr_count += 1
# XXX: the current DNS message parser can't preserve the
@@ -289,15 +359,20 @@ class XfrinConnection(asyncore.dispatcher):
recvdata = self._get_request_response(msg_len)
msg = Message(Message.PARSE)
msg.from_wire(recvdata)
+
+ # TSIG related checks, including an unexpected signed response
+ self._check_response_tsig(msg, recvdata)
+
+ # Perform response status validation
self._check_response_status(msg)
-
+
answer_section = msg.get_section(Message.SECTION_ANSWER)
for rr in self._handle_answer_section(answer_section):
yield rr
if self._soa_rr_count == 2:
break
-
+
if self._shutdown_event.is_set():
raise XfrinException('xfrin is forced to stop')
@@ -317,21 +392,18 @@ class XfrinConnection(asyncore.dispatcher):
# Overwrite the log function, log nothing
pass
- def log_msg(self, msg):
- if self._verbose:
- sys.stdout.write('[b10-xfrin] %s\n' % str(msg))
-
-
-def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo, check_soa, verbose):
+def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
+ shutdown_event, master_addrinfo, check_soa, verbose,
+ tsig_key):
xfrin_recorder.increment(zone_name)
sock_map = {}
conn = XfrinConnection(sock_map, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo, verbose)
+ shutdown_event, master_addrinfo,
+ tsig_key, verbose)
ret = XFRIN_FAIL
if conn.connect_to_master():
ret = conn.do_xfrin(check_soa)
-
+
# Publish the zone transfer result news, so zonemgr can reset the
# zone timer, and xfrout can notify the zone's slaves if the result
# is success.
@@ -367,23 +439,122 @@ class XfrinRecorder:
self._lock.release()
return ret
+class ZoneInfo:
+ def __init__(self, config_data, module_cc):
+ """Creates a zone_info with the config data element as
+ specified by the 'zones' list in xfrin.spec. Module_cc is
+ needed to get the defaults from the specification"""
+ self._module_cc = module_cc
+ self.set_name(config_data.get('name'))
+ self.set_master_addr(config_data.get('master_addr'))
+
+ self.set_master_port(config_data.get('master_port'))
+ self.set_zone_class(config_data.get('class'))
+ self.set_tsig_key(config_data.get('tsig_key'))
+ self.set_ixfr_disabled(config_data.get('ixfr_disabled'))
+
+ def set_name(self, name_str):
+ """Set the name for this zone given a name string.
+ Raises XfrinZoneInfoException if name_str is None or if it
+ cannot be parsed."""
+ if name_str is None:
+ raise XfrinZoneInfoException("Configuration zones list "
+ "element does not contain "
+ "'name' attribute")
+ else:
+ self.name = _check_zone_name(name_str)
+
+ def set_master_addr(self, master_addr_str):
+ """Set the master address for this zone given an IP address
+ string. Raises XfrinZoneInfoException if master_addr_str is
+ None or if it cannot be parsed."""
+ if master_addr_str is None:
+ raise XfrinZoneInfoException("master address missing from config data")
+ else:
+ try:
+ self.master_addr = isc.net.parse.addr_parse(master_addr_str)
+ except ValueError:
+ logger.error(XFRIN_BAD_MASTER_ADDR_FORMAT, master_addr_str)
+ errmsg = "bad format for zone's master: " + master_addr_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_master_port(self, master_port_str):
+ """Set the master port given a port number string. If
+ master_port_str is None, the default from the specification
+ for this module will be used. Raises XfrinZoneInfoException if
+ the string contains an invalid port number"""
+ if master_port_str is None:
+ self.master_port = self._module_cc.get_default_value("zones/master_port")
+ else:
+ try:
+ self.master_port = isc.net.parse.port_parse(master_port_str)
+ except ValueError:
+ logger.error(XFRIN_BAD_MASTER_PORT_FORMAT, master_port_str)
+ errmsg = "bad format for zone's master port: " + master_port_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_zone_class(self, zone_class_str):
+ """Set the zone class given an RR class str (e.g. "IN"). If
+ zone_class_str is None, it will default to what is specified
+ in the specification file for this module. Raises
+ XfrinZoneInfoException if the string cannot be parsed."""
+ # TODO: remove _str
+ self.class_str = zone_class_str or self._module_cc.get_default_value("zones/class")
+ if zone_class_str == None:
+ #TODO rrclass->zone_class
+ self.rrclass = RRClass(self._module_cc.get_default_value("zones/class"))
+ else:
+ try:
+ self.rrclass = RRClass(zone_class_str)
+ except InvalidRRClass:
+ logger.error(XFRIN_BAD_ZONE_CLASS, zone_class_str)
+ errmsg = "invalid zone class: " + zone_class_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_tsig_key(self, tsig_key_str):
+ """Set the tsig_key for this zone, given a TSIG key string
+ representation. If tsig_key_str is None, no TSIG key will
+ be set. Raises XfrinZoneInfoException if tsig_key_str cannot
+ be parsed."""
+ if tsig_key_str is None:
+ self.tsig_key = None
+ else:
+ try:
+ self.tsig_key = TSIGKey(tsig_key_str)
+ except InvalidParameter as ipe:
+ logger.error(XFRIN_BAD_TSIG_KEY_STRING, tsig_key_str)
+ errmsg = "bad TSIG key string: " + tsig_key_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_ixfr_disabled(self, ixfr_disabled):
+ """Set ixfr_disabled. If set to False (the default), it will use
+ IXFR for incoming transfers. If set to True, it will use AXFR.
+ At this moment there is no automatic fallback"""
+ # don't care what type it is; if evaluates to true, set to True
+ if ixfr_disabled:
+ self.ixfr_disabled = True
+ else:
+ self.ixfr_disabled = False
+
+ def get_master_addr_info(self):
+ return (self.master_addr.family, socket.SOCK_STREAM,
+ (str(self.master_addr), self.master_port))
+
class Xfrin:
def __init__(self, verbose = False):
self._max_transfers_in = 10
- #TODO, this is the temp way to set the zone's master.
- self._master_addr = DEFAULT_MASTER
- self._master_port = DEFAULT_MASTER_PORT
+ self._zones = {}
self._cc_setup()
self.recorder = XfrinRecorder()
self._shutdown_event = threading.Event()
self._verbose = verbose
def _cc_setup(self):
- '''This method is used only as part of initialization, but is
- implemented separately for convenience of unit tests; by letting
- the test code override this method we can test most of this class
+ '''This method is used only as part of initialization, but is
+ implemented separately for convenience of unit tests; by letting
+ the test code override this method we can test most of this class
without requiring a command channel.'''
- # Create one session for sending command to other modules, because the
+ # Create one session for sending command to other modules, because the
# listening session will block the send operation.
self._send_cc_session = isc.cc.Session()
self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
@@ -391,36 +562,55 @@ class Xfrin:
self.command_handler)
self._module_cc.start()
config_data = self._module_cc.get_full_config()
- self._max_transfers_in = config_data.get("transfers_in")
- self._master_addr = config_data.get('master_addr') or self._master_addr
- self._master_port = config_data.get('master_port') or self._master_port
+ self.config_handler(config_data)
def _cc_check_command(self):
- '''This is a straightforward wrapper for cc.check_command,
- but provided as a separate method for the convenience
+ '''This is a straightforward wrapper for cc.check_command,
+ but provided as a separate method for the convenience
of unit tests.'''
self._module_cc.check_command(False)
+ def _get_zone_info(self, name, rrclass):
+ """Returns the ZoneInfo object containing the configured data
+ for the given zone name. If the zone name did not have any
+ data, returns None"""
+ return self._zones.get((name.to_text(), rrclass.to_text()))
+
+ def _add_zone_info(self, zone_info):
+ """Add the zone info. Raises a XfrinZoneInfoException if a zone
+ with the same name and class is already configured"""
+ key = (zone_info.name.to_text(), zone_info.class_str)
+ if key in self._zones:
+ raise XfrinZoneInfoException("zone " + str(key) +
+ " configured multiple times")
+ self._zones[key] = zone_info
+
+ def _clear_zone_info(self):
+ self._zones = {}
+
def config_handler(self, new_config):
+ # backup all config data (should there be a problem in the new
+ # data)
+ old_max_transfers_in = self._max_transfers_in
+ old_zones = self._zones
+
self._max_transfers_in = new_config.get("transfers_in") or self._max_transfers_in
- if ('master_addr' in new_config) or ('master_port' in new_config):
- # User should change the port and address together.
- try:
- addr = new_config.get('master_addr') or self._master_addr
- port = new_config.get('master_port') or self._master_port
- isc.net.parse.addr_parse(addr)
- isc.net.parse.port_parse(port)
- self._master_addr = addr
- self._master_port = port
- except ValueError:
- errmsg = "bad format for zone's master: " + str(new_config)
- log_error(errmsg)
- return create_answer(1, errmsg)
+
+ if 'zones' in new_config:
+ self._clear_zone_info()
+ for zone_config in new_config.get('zones'):
+ try:
+ zone_info = ZoneInfo(zone_config, self._module_cc)
+ self._add_zone_info(zone_info)
+ except XfrinZoneInfoException as xce:
+ self._zones = old_zones
+ self._max_transfers_in = old_max_transfers_in
+ return create_answer(1, str(xce))
return create_answer(0)
def shutdown(self):
- ''' shutdown the xfrin process. the thread which is doing xfrin should be
+ ''' shutdown the xfrin process. the thread which is doing xfrin should be
terminated.
'''
self._shutdown_event.set()
@@ -436,61 +626,104 @@ class Xfrin:
if command == 'shutdown':
self._shutdown_event.set()
elif command == 'notify' or command == REFRESH_FROM_ZONEMGR:
- # Xfrin receives the refresh/notify command from zone manager.
- # notify command maybe has the parameters which
+ # Xfrin receives the refresh/notify command from zone manager.
+ # notify command maybe has the parameters which
# specify the notifyfrom address and port, according the RFC1996, zone
# transfer should starts first from the notifyfrom, but now, let 'TODO' it.
+ # (using the value now, while we can only set one master address, would be
+ # a security hole. Once we add the ability to have multiple master addresses,
+ # we should check if it matches one of them, and then use it.)
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
- (master_addr) = build_addr_info(self._master_addr, self._master_port)
- ret = self.xfrin_start(zone_name,
- rrclass,
- self._get_db_file(),
- master_addr,
- True)
- answer = create_answer(ret[0], ret[1])
+ zone_info = self._get_zone_info(zone_name, rrclass)
+ if zone_info is None:
+ # TODO what to do? no info known about zone. defaults?
+ errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
+ logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
+ answer = create_answer(1, errmsg)
+ else:
+ master_addr = zone_info.get_master_addr_info()
+ ret = self.xfrin_start(zone_name,
+ rrclass,
+ self._get_db_file(),
+ master_addr,
+ zone_info.tsig_key,
+ True)
+ answer = create_answer(ret[0], ret[1])
elif command == 'retransfer' or command == 'refresh':
# Xfrin receives the retransfer/refresh from cmdctl(sent by bindctl).
- # If the command has specified master address, do transfer from the
- # master address, or else do transfer from the configured masters.
+ # If the command has specified master address, do transfer from the
+ # master address, or else do transfer from the configured masters.
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
- master_addr = self._parse_master_and_port(args)
+ master_addr = self._parse_master_and_port(args, zone_name,
+ rrclass)
+ zone_info = self._get_zone_info(zone_name, rrclass)
+ tsig_key = None
+ if zone_info:
+ tsig_key = zone_info.tsig_key
db_file = args.get('db_file') or self._get_db_file()
- ret = self.xfrin_start(zone_name,
- rrclass,
- db_file,
+ ret = self.xfrin_start(zone_name,
+ rrclass,
+ db_file,
master_addr,
+ tsig_key,
(False if command == 'retransfer' else True))
answer = create_answer(ret[0], ret[1])
else:
answer = create_answer(1, 'unknown command: ' + command)
except XfrinException as err:
- log_error('error happened for command: %s, %s' % (command, str(err)) )
+ logger.error(XFRIN_COMMAND_ERROR, command, str(err))
answer = create_answer(1, str(err))
return answer
def _parse_zone_name_and_class(self, args):
- zone_name = args.get('zone_name')
- if not zone_name:
+ zone_name_str = args.get('zone_name')
+ if zone_name_str is None:
raise XfrinException('zone name should be provided')
- rrclass = args.get('zone_class')
- if not rrclass:
- rrclass = RRClass.IN()
+ return (_check_zone_name(zone_name_str), _check_zone_class(args.get('zone_class')))
+
+ def _parse_master_and_port(self, args, zone_name, zone_class):
+ """
+ Return tuple (family, socktype, sockaddr) for address and port in given
+ args dict.
+ IPv4 and IPv6 are the only supported addresses now, so sockaddr will be
+ (address, port). The socktype is socket.SOCK_STREAM for now.
+ """
+ # check if we have configured info about this zone, in case
+ # port or master are not specified
+ zone_info = self._get_zone_info(zone_name, zone_class)
+
+ addr_str = args.get('master')
+ if addr_str is None:
+ if zone_info is not None:
+ addr = zone_info.master_addr
+ else:
+ raise XfrinException("Master address not given or "
+ "configured for " + zone_name.to_text())
+ else:
+ try:
+ addr = isc.net.parse.addr_parse(addr_str)
+ except ValueError as err:
+ raise XfrinException("failed to resolve master address %s: %s" %
+ (addr_str, str(err)))
+
+ port_str = args.get('port')
+ if port_str is None:
+ if zone_info is not None:
+ port = zone_info.master_port
+ else:
+ port = DEFAULT_MASTER_PORT
else:
try:
- rrclass = RRClass(rrclass)
- except InvalidRRClass as e:
- raise XfrinException('invalid RRClass: ' + rrclass)
-
- return zone_name, rrclass
-
- def _parse_master_and_port(self, args):
- port = args.get('port') or self._master_port
- master = args.get('master') or self._master_addr
- return build_addr_info(master, port)
-
+ port = isc.net.parse.port_parse(port_str)
+ except ValueError as err:
+ raise XfrinException("failed to parse port=%s: %s" %
+ (port_str, str(err)))
+
+ return (addr.family, socket.SOCK_STREAM, (str(addr), port))
+
def _get_db_file(self):
#TODO, the db file path should be got in auth server's configuration
# if we need access to this configuration more often, we
@@ -506,12 +739,12 @@ class Xfrin:
db_file = os.environ["B10_FROM_BUILD"] + os.sep + "bind10_zones.sqlite3"
self._module_cc.remove_remote_config(AUTH_SPECFILE_LOCATION)
return db_file
-
+
def publish_xfrin_news(self, zone_name, zone_class, xfr_result):
'''Send command to xfrout/zone manager module.
- If xfrin has finished successfully for one zone, tell the good
+ If xfrin has finished successfully for one zone, tell the good
news(command: zone_new_data_ready) to zone manager and xfrout.
- if xfrin failed, just tell the bad news to zone manager, so that
+ if xfrin failed, just tell the bad news to zone manager, so that
it can reset the refresh timer for that zone. '''
param = {'zone_name': zone_name, 'zone_class': zone_class.to_text()}
if xfr_result == XFRIN_OK:
@@ -531,9 +764,8 @@ class Xfrin:
seq)
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
- except socket.error as err:
- log_error("Fail to send message to %s and %s, msgq may has been killed"
- % (XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME))
+ except socket.error as err:
+ logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME)
else:
msg = create_command(ZONE_XFRIN_FAILED, param)
# catch the exception, in case msgq has been killed.
@@ -545,14 +777,13 @@ class Xfrin:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error as err:
- log_error("Fail to send message to %s, msgq may has been killed"
- % ZONE_MANAGER_MODULE_NAME)
+ logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
def startup(self):
while not self._shutdown_event.is_set():
self._cc_check_command()
- def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+ def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo, tsig_key,
check_soa = True):
if "pydnspp" not in sys.modules:
return (1, "xfrin failed, can't load dns message python library: 'pydnspp'")
@@ -567,11 +798,13 @@ class Xfrin:
xfrin_thread = threading.Thread(target = process_xfrin,
args = (self,
self.recorder,
- zone_name, rrclass,
+ zone_name.to_text(),
+ rrclass,
db_file,
self._shutdown_event,
master_addrinfo, check_soa,
- self._verbose))
+ self._verbose,
+ tsig_key))
xfrin_thread.start()
return (0, 'zone xfrin is started')
@@ -588,20 +821,6 @@ def set_signal_handler():
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
-def build_addr_info(addrstr, portstr):
- """
- Return tuple (family, socktype, sockaddr) for given address and port.
- IPv4 and IPv6 are the only supported addresses now, so sockaddr will be
- (address, port). The socktype is socket.SOCK_STREAM for now.
- """
- try:
- port = isc.net.parse.port_parse(portstr)
- addr = isc.net.parse.addr_parse(addrstr)
- return (addr.family, socket.SOCK_STREAM, (addrstr, port))
- except ValueError as err:
- raise XfrinException("failed to resolve master address/port=%s/%s: %s" %
- (addrstr, portstr, str(err)))
-
def set_cmd_options(parser):
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="display more about what is going on")
@@ -626,12 +845,11 @@ def main(xfrin_class, use_signal = True):
xfrind = xfrin_class(verbose = options.verbose)
xfrind.startup()
except KeyboardInterrupt:
- log_error("exit b10-xfrin")
+ logger.info(XFRIN_STOPPED_BY_KEYBOARD)
except isc.cc.session.SessionError as e:
- log_error(str(e))
- log_error('Error happened! is the command channel daemon running?')
+ logger.error(XFRIN_CC_SESSION_ERROR, str(e))
except Exception as e:
- log_error(str(e))
+ logger.error(XFRIN_UNKNOWN_ERROR, str(e))
if xfrind:
xfrind.shutdown()
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index 61ddaad..bc93720 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -9,16 +9,48 @@
"item_optional": false,
"item_default": 10
},
- {
- "item_name": "master_addr",
- "item_type": "string",
- "item_optional": false,
- "item_default": ""
- },
- { "item_name": "master_port",
- "item_type": "integer",
+ { "item_name": "zones",
+ "item_type": "list",
"item_optional": false,
- "item_default": 53
+ "item_default": [],
+ "list_item_spec":
+ { "item_type": "map",
+ "item_name": "zone_info",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "name",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ { "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "master_addr",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ { "item_name": "master_port",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 53
+ },
+ { "item_name": "tsig_key",
+ "item_type": "string",
+ "item_optional": true
+ },
+ { "item_name": "ixfr_disabled",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
new file mode 100644
index 0000000..80a0be3
--- /dev/null
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -0,0 +1,91 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+
+% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+
+% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
+The AXFR transfer of the given zone was successfully completed.
+
+% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
+The given master address is not a valid IP address.
+
+% XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1
+The master port as read from the configuration is not a valid port number.
+
+% XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFRIN_BAD_ZONE_CLASS Invalid zone class: %1
+The zone class as read from the configuration is not a valid DNS class.
+
+% XFRIN_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+
+% XFRIN_COMMAND_ERROR error while executing command '%1': %2
+There was an error while the given command was being processed. The
+error is given in the log message.
+
+% XFRIN_CONNECT_MASTER error connecting to master at %1: %2
+There was an error opening a connection to the master. The error is
+shown in the log message.
+
+% XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+
+% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+
+% XFRIN_IMPORT_DNS error importing python DNS module: %1
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+
+% XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+
+% XFRIN_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+
+% XFRIN_UNKNOWN_ERROR unknown error: %1
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index d4f021e..6100e64 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -7,10 +7,15 @@ pkglibexec_SCRIPTS = b10-xfrout
b10_xfroutdir = $(pkgdatadir)
b10_xfrout_DATA = xfrout.spec
-CLEANFILES= b10-xfrout xfrout.pyc xfrout.spec
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.pyc
man_MANS = b10-xfrout.8
-EXTRA_DIST = $(man_MANS) b10-xfrout.xml
+EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
if ENABLE_MAN
@@ -19,12 +24,21 @@ b10-xfrout.8: b10-xfrout.xml
endif
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py : xfrout_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrout_messages.mes
xfrout.spec: xfrout.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py
+b10-xfrout: xfrout.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..9889b80 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -134,6 +134,14 @@
data storage types.
</simpara></note>
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 11916af..ace8fc9 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -1,15 +1,17 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We set B10_FROM_BUILD below, so that the test can refer to the in-source
+# spec file.
check-local:
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -18,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
- $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 472ef3c..85979a0 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -18,10 +18,16 @@
import unittest
import os
+from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
+import isc.config
from pydnspp import *
from xfrout import *
import xfrout
+import isc.log
+import isc.acl.dns
+
+TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
# our fake socket, where we can read and insert messages
class MySocket():
@@ -85,17 +91,204 @@ class TestXfroutSession(unittest.TestCase):
msg.from_wire(self.mdata)
return msg
+ def create_mock_tsig_ctx(self, error):
+ # This helper function creates a MockTSIGContext for a given key
+ # and TSIG error to be used as a result of verify (normally faked
+ # one)
+ mock_ctx = MockTSIGContext(TSIG_KEY)
+ mock_ctx.error = error
+ return mock_ctx
+
+ def message_has_tsig(self, msg):
+ return msg.get_tsig_record() is not None
+
+ def create_request_data(self, with_tsig=False):
+ msg = Message(Message.RENDER)
+ query_id = 0x1035
+ msg.set_qid(query_id)
+ msg.set_opcode(Opcode.QUERY())
+ msg.set_rcode(Rcode.NOERROR())
+ query_question = Question(Name("example.com"), RRClass.IN(),
+ RRType.AXFR())
+ msg.add_question(query_question)
+
+ renderer = MessageRenderer()
+ if with_tsig:
+ tsig_ctx = MockTSIGContext(TSIG_KEY)
+ msg.to_wire(renderer, tsig_ctx)
+ else:
+ msg.to_wire(renderer)
+ request_data = renderer.get_data()
+ return request_data
+
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
- self.log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
- self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), self.log)
- self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+ self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
+ TSIGKeyRing(), ('127.0.0.1', 12345),
+ # When not testing ACLs, simply accept
+ isc.acl.dns.REQUEST_LOADER.load(
+ [{"action": "ACCEPT"}]),
+ {})
+ self.mdata = self.create_request_data(False)
self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
def test_parse_query_message(self):
[get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(get_rcode.to_text(), "NOERROR")
+ # tsig signed query message
+ request_data = self.create_request_data(True)
+ # BADKEY
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOTAUTH")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+ # NOERROR
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+
+ def check_transfer_acl(self, acl_setter):
+ # ACL checks, put some ACL inside
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {
+ "from": "127.0.0.1",
+ "action": "ACCEPT"
+ },
+ {
+ "from": "192.0.2.1",
+ "action": "DROP"
+ }
+ ]))
+ # Localhost (the default in this test) is accepted
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # This should be dropped completely, therefore returning None
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(None, rcode)
+ # This should be refused, therefore REFUSED
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # TSIG signed request
+ request_data = self.create_request_data(True)
+
+ # If the TSIG check fails, it should not check ACL
+ # (If it checked ACL as well, it would just drop the request)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._tsig_key_ring = TSIGKeyRing()
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOTAUTH")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+
+ # ACL using TSIG: successful case
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+
+ # ACL using TSIG: key name doesn't match; should be rejected
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # ACL using TSIG: no TSIG; should be rejected
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ #
+ # ACL using IP + TSIG: both should match
+ #
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
+ "action": "ACCEPT"},
+ {"action": "REJECT"}
+ ]))
+ # both matches
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # TSIG matches, but address doesn't
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Address matches, but TSIG doesn't (not included)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Neither address nor TSIG matches
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ def test_transfer_acl(self):
+ # ACL checks only with the default ACL
+ def acl_setter(acl):
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl(self):
+ # ACL check with a per zone ACL + default ACL. The per zone ACL
+ # should match the queryied zone, so it should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.com.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = acl
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl_nomatch(self):
+ # similar to the previous one, but the per zone doesn't match the
+ # query. The default should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.org.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = \
+ isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_get_transfer_acl(self):
+ # set the default ACL. If there's no specific zone ACL, this one
+ # should be used.
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "ACCEPT"}])
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ self.assertEqual(acl, self.xfrsess._acl)
+
+ # install a per zone config with transfer ACL for example.com. Then
+ # that ACL will be used for example.com; for others the default ACL
+ # will still be used.
+ com_acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "REJECT"}])
+ self.xfrsess._zone_config[('IN', 'example.com.')] = {}
+ self.xfrsess._zone_config[('IN', 'example.com.')]['transfer_acl'] = \
+ com_acl
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('example.com'),
+ RRClass.IN()))
+ self.assertEqual(self.xfrsess._acl,
+ self.xfrsess._get_transfer_acl(Name('example.org'),
+ RRClass.IN()))
+
+ # Name matching should be case insensitive.
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
+ RRClass.IN()))
+
def test_get_query_zone_name(self):
msg = self.getmsg()
self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
@@ -111,6 +304,14 @@ class TestXfroutSession(unittest.TestCase):
get_msg = self.sock.read_msg()
self.assertEqual(get_msg.get_rcode().to_text(), "NXDOMAIN")
+ # tsig signed message
+ msg = self.getmsg()
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ self.xfrsess._reply_query_with_error_rcode(msg, self.sock, Rcode(3))
+ get_msg = self.sock.read_msg()
+ self.assertEqual(get_msg.get_rcode().to_text(), "NXDOMAIN")
+ self.assertTrue(self.message_has_tsig(get_msg))
+
def test_send_message(self):
msg = self.getmsg()
msg.make_response()
@@ -146,12 +347,6 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(msg.get_rcode(), rcode)
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
- def test_reply_query_with_format_error(self):
- msg = self.getmsg()
- self.xfrsess._reply_query_with_format_error(msg, self.sock)
- get_msg = self.sock.read_msg()
- self.assertEqual(get_msg.get_rcode().to_text(), "FORMERR")
-
def test_create_rrset_from_db_record(self):
rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
self.assertEqual(rrset.get_name().to_text(), "example.com.")
@@ -162,11 +357,16 @@ class TestXfroutSession(unittest.TestCase):
def test_send_message_with_last_soa(self):
rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
-
msg = self.getmsg()
msg.make_response()
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, 0)
+
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, packet_neet_not_sign)
get_msg = self.sock.read_msg()
+ # tsig context is not exist
+ self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
@@ -180,6 +380,42 @@ class TestXfroutSession(unittest.TestCase):
rdata = answer.get_rdata()
self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+ # msg is the TSIG_SIGN_EVERY_NTH one
+ # sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, TSIG_SIGN_EVERY_NTH)
+ get_msg = self.sock.read_msg()
+ # tsig context is not exist
+ self.assertFalse(self.message_has_tsig(get_msg))
+
+ def test_send_message_with_last_soa_with_tsig(self):
+ # create tsig context
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+
+ rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
+ msg = self.getmsg()
+ msg.make_response()
+
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ # msg is not the TSIG_SIGN_EVERY_NTH one
+ # sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, packet_neet_not_sign)
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+
+ self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
+ self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+ self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
+
+ # msg is the TSIG_SIGN_EVERY_NTH one
+ # sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, TSIG_SIGN_EVERY_NTH)
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+
def test_trigger_send_message_with_last_soa(self):
rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
@@ -187,15 +423,21 @@ class TestXfroutSession(unittest.TestCase):
msg = self.getmsg()
msg.make_response()
-
msg.add_rrset(Message.SECTION_ANSWER, rrset_a)
- # give the function a value that is larger than MAX-len(rrset)
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, 65520)
+ # length larger than MAX-len(rrset)
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+
+ # give the function a value that is larger than MAX-len(rrset)
# this should have triggered the sending of two messages
# (1 with the rrset we added manually, and 1 that triggered
# the sending in _with_last_soa)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+ packet_neet_not_sign)
get_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
@@ -208,6 +450,7 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(rdata[0].to_text(), "192.0.2.1")
get_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 0)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
@@ -223,6 +466,45 @@ class TestXfroutSession(unittest.TestCase):
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
+ def test_trigger_send_message_with_last_soa_with_tsig(self):
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
+ msg = self.getmsg()
+ msg.make_response()
+ msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+
+ # length larger than MAX-len(rrset)
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+
+ # give the function a value that is larger than MAX-len(rrset)
+ # this should have triggered the sending of two messages
+ # (1 with the rrset we added manually, and 1 that triggered
+ # the sending in _with_last_soa)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+ packet_neet_not_sign)
+ get_msg = self.sock.read_msg()
+ # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
+ self.assertFalse(self.message_has_tsig(get_msg))
+ # the last packet should be tsig signed
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+ # and it should not have sent anything else
+ self.assertEqual(0, len(self.sock.sendqueue))
+
+
+ # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+ xfrout.TSIG_SIGN_EVERY_NTH)
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+ # the last packet should be tsig signed
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+ # and it should not have sent anything else
+ self.assertEqual(0, len(self.sock.sendqueue))
+
def test_get_rrset_len(self):
rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
self.assertEqual(82, get_rrset_len(rrset_soa))
@@ -313,9 +595,56 @@ class TestXfroutSession(unittest.TestCase):
reply_msg = self.sock.read_msg()
self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
-class MyCCSession():
+ def test_reply_xfrout_query_noerror_with_tsig(self):
+ rrset_data = (4, 3, 'a.example.com.', 'com.example.', 3600, 'A', None, '192.168.1.1')
+ global sqlite3_ds
+ global xfrout
+ def get_zone_soa(zonename, file):
+ return self.soa_record
+
+ def get_zone_datas(zone, file):
+ zone_rrsets = []
+ for i in range(0, 100):
+ zone_rrsets.insert(i, rrset_data)
+ return zone_rrsets
+
+ def get_rrset_len(rrset):
+ return 65520
+
+ sqlite3_ds.get_zone_soa = get_zone_soa
+ sqlite3_ds.get_zone_datas = get_zone_datas
+ xfrout.get_rrset_len = get_rrset_len
+
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+
+ # tsig signed first package
+ reply_msg = self.sock.read_msg()
+ self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+ self.assertTrue(self.message_has_tsig(reply_msg))
+ # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
+ for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
+ reply_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(reply_msg))
+ # TSIG_SIGN_EVERY_NTH packet has tsig
+ reply_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(reply_msg))
+
+ for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
+ reply_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(reply_msg))
+ # tsig signed last package
+ reply_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(reply_msg))
+
+ # and it should not have sent anything else
+ self.assertEqual(0, len(self.sock.sendqueue))
+
+class MyCCSession(isc.config.ConfigData):
def __init__(self):
- pass
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
def get_remote_config_value(self, module_name, identifier):
if module_name == "Auth" and identifier == "database_file":
@@ -326,18 +655,42 @@ class MyCCSession():
class MyUnixSockServer(UnixSockServer):
def __init__(self):
- self._lock = threading.Lock()
- self._transfers_counter = 0
self._shutdown_event = threading.Event()
- self._max_transfers_out = 10
+ self._common_init()
self._cc = MyCCSession()
- self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
+ self.update_config_data(self._cc.get_full_config())
class TestUnixSockServer(unittest.TestCase):
def setUp(self):
self.write_sock, self.read_sock = socket.socketpair()
self.unix = MyUnixSockServer()
+ def test_guess_remote(self):
+ """Test we can guess the remote endpoint when we have only the
+ file descriptor. This is needed, because we get only that one
+ from auth."""
+ # We test with UDP, as it can be "connected" without other
+ # endpoint
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.connect(('127.0.0.1', 12345))
+ self.assertEqual(('127.0.0.1', 12345),
+ self.unix._guess_remote(sock.fileno()))
+ if socket.has_ipv6:
+ # Don't check IPv6 address on hosts not supporting them
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
+ sock.connect(('::1', 12345))
+ self.assertEqual(('::1', 12345, 0, 0),
+ self.unix._guess_remote(sock.fileno()))
+ # Try when pretending there's no IPv6 support
+ # (No need to pretend when there's really no IPv6)
+ xfrout.socket.has_ipv6 = False
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.connect(('127.0.0.1', 12345))
+ self.assertEqual(('127.0.0.1', 12345),
+ self.unix._guess_remote(sock.fileno()))
+ # Return it back
+ xfrout.socket.has_ipv6 = True
+
def test_receive_query_message(self):
send_msg = b"\xd6=\x00\x00\x00\x01\x00"
msg_len = struct.pack('H', socket.htons(len(send_msg)))
@@ -346,9 +699,121 @@ class TestUnixSockServer(unittest.TestCase):
recv_msg = self.unix._receive_query_message(self.read_sock)
self.assertEqual(recv_msg, send_msg)
- def test_updata_config_data(self):
+ def check_default_ACL(self):
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
+
+ def check_loaded_ACL(self, acl):
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.ACCEPT, acl.execute(context))
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("192.0.2.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.REJECT, acl.execute(context))
+
+ def test_update_config_data(self):
+ self.check_default_ACL()
+ tsig_key_str = 'example.com:SFuWd/q99SzF8Yzd1QbB9g=='
+ tsig_key_list = [tsig_key_str]
+ bad_key_list = ['bad..example.com:SFuWd/q99SzF8Yzd1QbB9g==']
self.unix.update_config_data({'transfers_out':10 })
self.assertEqual(self.unix._max_transfers_out, 10)
+ self.assertTrue(self.unix.tsig_key_ring is not None)
+ self.check_default_ACL()
+
+ self.unix.update_config_data({'transfers_out':9,
+ 'tsig_key_ring':tsig_key_list})
+ self.assertEqual(self.unix._max_transfers_out, 9)
+ self.assertEqual(self.unix.tsig_key_ring.size(), 1)
+ self.unix.tsig_key_ring.remove(Name("example.com."))
+ self.assertEqual(self.unix.tsig_key_ring.size(), 0)
+
+ # bad tsig key
+ config_data = {'transfers_out':9, 'tsig_key_ring': bad_key_list}
+ self.assertRaises(None, self.unix.update_config_data(config_data))
+ self.assertEqual(self.unix.tsig_key_ring.size(), 0)
+
+ # Load the ACL
+ self.unix.update_config_data({'transfer_acl': [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]})
+ self.check_loaded_ACL(self.unix._acl)
+ # Pass a wrong data there and check it does not replace the old one
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'transfer_acl': ['Something bad']})
+ self.check_loaded_ACL(self.unix._acl)
+
+ def test_zone_config_data(self):
+ # By default, there's no specific zone config
+ self.assertEqual({}, self.unix._zone_config)
+
+ # Adding config for a specific zone. The config is empty unless
+ # explicitly specified.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'class': 'IN'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class can be omitted
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class, name are stored in the "normalized" form. class
+ # strings are upper cased, names are down cased.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'EXAMPLE.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # invalid zone class, name will result in exceptions
+ self.assertRaises(EmptyLabel,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'bad..example'}]})
+ self.assertRaises(InvalidRRClass,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'class': 'badclass'}]})
+
+ # Configuring a couple of more zones
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'},
+ {'origin': 'example.com',
+ 'class': 'CH'},
+ {'origin': 'example.org'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('CH', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.org.')])
+
+ # Duplicate data: should be rejected with an exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com'},
+ {'origin': 'example.org'},
+ {'origin': 'example.com'}]})
+
+ def test_zone_config_data_with_acl(self):
+ # Similar to the previous test, but with transfer_acl config
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]}]})
+ acl = self.unix._zone_config[('IN', 'example.com.')]['transfer_acl']
+ self.check_loaded_ACL(acl)
+
+ # invalid ACL syntax will be rejected with exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'action': 'BADACTION'}]}]})
def test_get_db_file(self):
self.assertEqual(self.unix.get_db_file(), "initdb.file")
@@ -466,4 +931,5 @@ class TestInitialization(unittest.TestCase):
self.assertEqual(xfrout.UNIX_SOCKET_FILE, "The/Socket/File")
if __name__== "__main__":
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 17ca3eb..8049e29 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -26,7 +26,6 @@ from isc.datasrc import sqlite3_ds
from socketserver import *
import os
from isc.config.ccsession import *
-from isc.log.log import *
from isc.cc import SessionError, SessionTimeout
from isc.notify import notify_out
import isc.util.process
@@ -36,16 +35,36 @@ import errno
from optparse import OptionParser, OptionValueError
from isc.util import socketserver_mixin
+from isc.log_messages.xfrout_messages import *
+
+isc.log.init("b10-xfrout")
+logger = isc.log.Logger("xfrout")
+
try:
from libutil_io_python import *
from pydnspp import *
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrout process
# must keep running, so we warn about it and move forward.
- sys.stderr.write('[b10-xfrout] failed to import DNS or isc.util.io module: %s\n' % str(e))
+ log.error(XFROUT_IMPORT, str(e))
+
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
+from isc.acl.dns import REQUEST_LOADER
isc.util.process.rename()
+class XfroutConfigError(Exception):
+ """An exception indicating an error in updating xfrout configuration.
+
+ This exception is raised when the xfrout process encouters an error in
+ handling configuration updates. Not all syntax error can be caught
+ at the module-CC layer, so xfrout needs to (explicitly or implicitly)
+ validate the given configuration data itself. When it finds an error
+ it raises this exception (either directly or by converting an exception
+ from other modules) as a unified error in configuration.
+ """
+ pass
+
def init_paths():
global SPECFILE_PATH
global AUTH_SPECFILE_PATH
@@ -72,9 +91,9 @@ init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
-MAX_TRANSFERS_OUT = 10
VERBOSE_MODE = False
-
+# tsig sign every N axfr packets.
+TSIG_SIGN_EVERY_NTH = 96
XFROUT_MAX_MESSAGE_SIZE = 65535
@@ -86,41 +105,103 @@ def get_rrset_len(rrset):
class XfroutSession():
- def __init__(self, sock_fd, request_data, server, log):
- # The initializer for the superclass may call functions
- # that need _log to be set, so we set it first
+ def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
+ default_acl, zone_config):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
- self._log = log
+ self._tsig_key_ring = tsig_key_ring
+ self._tsig_ctx = None
+ self._tsig_len = 0
+ self._remote = remote
+ self._acl = default_acl
+ self._zone_config = zone_config
self.handle()
+ def create_tsig_ctx(self, tsig_record, tsig_key_ring):
+ return TSIGContext(tsig_record.get_name(), tsig_record.get_rdata().get_algorithm(),
+ tsig_key_ring)
+
def handle(self):
''' Handle a xfrout query, send xfrout response '''
try:
self.dns_xfrout_start(self._sock_fd, self._request_data)
#TODO, avoid catching all exceptions
except Exception as e:
- self._log.log_message("error", str(e))
+ logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
+ pass
os.close(self._sock_fd)
+ def _check_request_tsig(self, msg, request_data):
+ ''' If request has a tsig record, perform tsig related checks '''
+ tsig_record = msg.get_tsig_record()
+ if tsig_record is not None:
+ self._tsig_len = tsig_record.get_length()
+ self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+ tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
+ if tsig_error != TSIGError.NOERROR:
+ return Rcode.NOTAUTH()
+
+ return Rcode.NOERROR()
+
def _parse_query_message(self, mdata):
''' parse query message to [socket,message]'''
#TODO, need to add parseHeader() in case the message header is invalid
try:
msg = Message(Message.PARSE)
Message.from_wire(msg, mdata)
- except Exception as err:
- self._log.log_message("error", str(err))
+ except Exception as err: # Exception is too broad
+ logger.error(XFROUT_PARSE_QUERY_ERROR, err)
return Rcode.FORMERR(), None
- return Rcode.NOERROR(), msg
+ # TSIG related checks
+ rcode = self._check_request_tsig(msg, mdata)
+
+ if rcode == Rcode.NOERROR():
+ # ACL checks
+ zone_name = msg.get_question()[0].get_name()
+ zone_class = msg.get_question()[0].get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote,
+ msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return None, None
+ elif acl_result == REJECT:
+ logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return Rcode.REFUSED(), msg
+
+ return rcode, msg
+
+ def _get_transfer_acl(self, zone_name, zone_class):
+ '''Return the ACL that should be applied for a given zone.
+
+ The zone is identified by a tuple of name and RR class.
+ If a per zone configuration for the zone exists and contains
+ transfer_acl, that ACL will be used; otherwise, the default
+ ACL will be used.
+
+ '''
+ # Internally zone names are managed in lower cased label characters,
+ # so we first need to convert the name.
+ zone_name_lower = Name(zone_name.to_text(), True)
+ config_key = (zone_class.to_text(), zone_name_lower.to_text())
+ if config_key in self._zone_config and \
+ 'transfer_acl' in self._zone_config[config_key]:
+ return self._zone_config[config_key]['transfer_acl']
+ return self._acl
def _get_query_zone_name(self, msg):
question = msg.get_question()[0]
return question.get_name().to_text()
+ def _get_query_zone_class(self, msg):
+ question = msg.get_question()[0]
+ return question.get_class().to_text()
def _send_data(self, sock_fd, data):
size = len(data)
@@ -130,32 +211,32 @@ class XfroutSession():
total_count += count
- def _send_message(self, sock_fd, msg):
+ def _send_message(self, sock_fd, msg, tsig_ctx=None):
render = MessageRenderer()
# As defined in RFC5936 section3.4, perform case-preserving name
# compression for AXFR message.
render.set_compress_mode(MessageRenderer.CASE_SENSITIVE)
render.set_length_limit(XFROUT_MAX_MESSAGE_SIZE)
- msg.to_wire(render)
+
+ # XXX Currently, python wrapper doesn't accept 'None' parameter in this case,
+ # we should remove the if statement and use a universal interface later.
+ if tsig_ctx is not None:
+ msg.to_wire(render, tsig_ctx)
+ else:
+ msg.to_wire(render)
+
header_len = struct.pack('H', socket.htons(render.get_length()))
self._send_data(sock_fd, header_len)
self._send_data(sock_fd, render.get_data())
def _reply_query_with_error_rcode(self, msg, sock_fd, rcode_):
- msg.make_response()
- msg.set_rcode(rcode_)
- self._send_message(sock_fd, msg)
-
-
- def _reply_query_with_format_error(self, msg, sock_fd):
- '''query message format isn't legal.'''
if not msg:
return # query message is invalid. send nothing back.
msg.make_response()
- msg.set_rcode(Rcode.FORMERR())
- self._send_message(sock_fd, msg)
+ msg.set_rcode(rcode_)
+ self._send_message(sock_fd, msg, self._tsig_ctx)
def _zone_has_soa(self, zone):
'''Judge if the zone has an SOA record.'''
@@ -204,22 +285,32 @@ class XfroutSession():
def dns_xfrout_start(self, sock_fd, msg_query):
rcode_, msg = self._parse_query_message(msg_query)
#TODO. create query message and parse header
- if rcode_ != Rcode.NOERROR():
- return self._reply_query_with_format_error(msg, sock_fd)
+ if rcode_ is None: # Dropped by ACL
+ return
+ elif rcode_ == Rcode.NOTAUTH() or rcode_ == Rcode.REFUSED():
+ return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
+ elif rcode_ != Rcode.NOERROR():
+ return self._reply_query_with_error_rcode(msg, sock_fd,
+ Rcode.FORMERR())
zone_name = self._get_query_zone_name(msg)
+ zone_class_str = self._get_query_zone_class(msg)
+ # TODO: should we not also include class in the check?
rcode_ = self._check_xfrout_available(zone_name)
+
if rcode_ != Rcode.NOERROR():
- self._log.log_message("info", "transfer of '%s/IN' failed: %s",
- zone_name, rcode_.to_text())
- return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
+ logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
+ zone_class_str, rcode_.to_text())
+ return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
- self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
+ logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
self._reply_xfrout_query(msg, sock_fd, zone_name)
- self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
except Exception as err:
- self._log.log_message("error", str(err))
+ logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
+ zone_class_str, str(err))
+ pass
+ logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
self._server.decrease_transfers_counter()
return
@@ -248,37 +339,43 @@ class XfroutSession():
rrset_.add_rdata(rdata_)
return rrset_
- def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len):
+ def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
+ count_since_last_tsig_sign):
'''Add the SOA record to the end of message. If it can't be
added, a new message should be created to send out the last soa .
'''
rrset_len = get_rrset_len(rrset_soa)
- if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- else:
+ if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
+ message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
+ # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+ self._send_message(sock_fd, msg, self._tsig_ctx)
+ msg = self._clear_message(msg)
+ elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
+ message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
self._send_message(sock_fd, msg)
msg = self._clear_message(msg)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- self._send_message(sock_fd, msg)
+ # If tsig context exist, sign the last packet
+ msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ self._send_message(sock_fd, msg, self._tsig_ctx)
def _reply_xfrout_query(self, msg, sock_fd, zone_name):
#TODO, there should be a better way to insert rrset.
+ count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
msg.make_response()
msg.set_header_flag(Message.HEADERFLAG_AA)
soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
rrset_soa = self._create_rrset_from_db_record(soa_record)
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- message_upper_len = get_rrset_len(rrset_soa)
+ message_upper_len = get_rrset_len(rrset_soa) + self._tsig_len
for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
if self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
- self._log.log_message("info", "xfrout process is being shutdown")
+ logger.info(XFROUT_STOPPING)
return
-
# TODO: RRType.SOA() ?
if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
continue
@@ -294,28 +391,48 @@ class XfroutSession():
message_upper_len += rrset_len
continue
- self._send_message(sock_fd, msg)
+ # If tsig context exist, sign every N packets
+ if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
+ count_since_last_tsig_sign = 0
+ self._send_message(sock_fd, msg, self._tsig_ctx)
+ else:
+ self._send_message(sock_fd, msg)
+
+ count_since_last_tsig_sign += 1
msg = self._clear_message(msg)
msg.add_rrset(Message.SECTION_ANSWER, rrset_) # Add the rrset to the new message
- message_upper_len = rrset_len
- self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len)
+ # Reserve tsig space for signed packet
+ if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
+ message_upper_len = rrset_len + self._tsig_len
+ else:
+ message_upper_len = rrset_len
-class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
+ self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
+ count_since_last_tsig_sign)
+
+class UnixSockServer(socketserver_mixin.NoPollMixIn,
+ ThreadingUnixStreamServer):
'''The unix domain socket server which accept xfr query sent from auth server.'''
- def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc, log):
+ def __init__(self, sock_file, handle_class, shutdown_event, config_data,
+ cc):
self._remove_unused_sock_file(sock_file)
self._sock_file = sock_file
socketserver_mixin.NoPollMixIn.__init__(self)
ThreadingUnixStreamServer.__init__(self, sock_file, handle_class)
- self._lock = threading.Lock()
- self._transfers_counter = 0
self._shutdown_event = shutdown_event
self._write_sock, self._read_sock = socket.socketpair()
- self._log = log
- self.update_config_data(config_data)
+ self._common_init()
self._cc = cc
+ self.update_config_data(config_data)
+
+ def _common_init(self):
+ '''Initialization shared with the mock server class used for tests'''
+ self._lock = threading.Lock()
+ self._transfers_counter = 0
+ self._zone_config = {}
+ self._acl = None # this will be initialized in update_config_data()
def _receive_query_message(self, sock):
''' receive request message from sock'''
@@ -341,7 +458,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
request, client_address = self.get_request()
except socket.error:
- self._log.log_message("error", "Failed to fetch request")
+ logger.error(XFROUT_FETCH_REQUEST_ERROR)
return
# Check self._shutdown_event to ensure the real shutdown comes.
@@ -355,7 +472,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
(rlist, wlist, xlist) = ([], [], [])
continue
else:
- self._log.log_message("error", "Error with select(): %s" %e)
+ logger.error(XFROUT_SOCKET_SELECT_ERROR, str(e))
break
# self.server._shutdown_event will be set by now, if it is not a false
@@ -365,9 +482,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
self.process_request(request)
- except:
- self._log.log_message("error", "Exception happened during processing of %s"
- % str(client_address))
+ except Exception as pre:
+ log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
break
def _handle_request_noblock(self):
@@ -386,7 +502,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
# xfrout unix socket server, to check whether there is another
# xfrout running.
if sock_fd == FD_COMM_ERROR:
- self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
+ logger.error(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR)
return
# receive request msg
@@ -394,16 +510,41 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
if not request_data:
return
- t = threading.Thread(target = self.finish_request,
+ t = threading.Thread(target=self.finish_request,
args = (sock_fd, request_data))
if self.daemon_threads:
t.daemon = True
t.start()
+ def _guess_remote(self, sock_fd):
+ """
+ Guess remote address and port of the socket. The sock_fd must be a
+ socket
+ """
+ # This uses a trick. If the socket is IPv4 in reality and we pretend
+ # it to be IPv6, it returns IPv4 address anyway. This doesn't seem
+ # to care about the SOCK_STREAM parameter at all (which it really is,
+ # except for testing)
+ if socket.has_ipv6:
+ sock = socket.fromfd(sock_fd, socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ # To make it work even on hosts without IPv6 support
+ # (Any idea how to simulate this in test?)
+ sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
+ return sock.getpeername()
def finish_request(self, sock_fd, request_data):
- '''Finish one request by instantiating RequestHandlerClass.'''
- self.RequestHandlerClass(sock_fd, request_data, self, self._log)
+ '''Finish one request by instantiating RequestHandlerClass.
+
+ This method creates a XfroutSession object.
+ '''
+ self._lock.acquire()
+ acl = self._acl
+ zone_config = self._zone_config
+ self._lock.release()
+ self.RequestHandlerClass(sock_fd, request_data, self,
+ self.tsig_key_ring,
+ self._guess_remote(sock_fd), acl, zone_config)
def _remove_unused_sock_file(self, sock_file):
'''Try to remove the socket file. If the file is being used
@@ -411,8 +552,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
If it's not a socket file or nobody is listening
, it will be removed. If it can't be removed, exit from python. '''
if self._sock_file_in_use(sock_file):
- self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
- " is being used by another xfrout process\n" % sock_file)
+ logger.error(XFROUT_UNIX_SOCKET_FILE_IN_USE, sock_file)
sys.exit(0)
else:
if not os.path.exists(sock_file):
@@ -421,7 +561,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
os.unlink(sock_file)
except OSError as err:
- self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
+ logger.error(XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR, sock_file, str(err))
sys.exit(0)
def _sock_file_in_use(self, sock_file):
@@ -442,16 +582,83 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
os.unlink(self._sock_file)
except Exception as e:
- self._log.log_message('error', str(e))
+ logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
+ pass
def update_config_data(self, new_config):
- '''Apply the new config setting of xfrout module. '''
- self._log.log_message('info', 'update config data start.')
+ '''Apply the new config setting of xfrout module.
+
+ '''
self._lock.acquire()
- self._max_transfers_out = new_config.get('transfers_out')
- self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
+ try:
+ logger.info(XFROUT_NEW_CONFIG)
+ new_acl = self._acl
+ if 'transfer_acl' in new_config:
+ try:
+ new_acl = REQUEST_LOADER.load(new_config['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl: ' +
+ str(e))
+
+ new_zone_config = self._zone_config
+ zconfig_data = new_config.get('zone_config')
+ if zconfig_data is not None:
+ new_zone_config = self.__create_zone_config(zconfig_data)
+
+ self._acl = new_acl
+ self._zone_config = new_zone_config
+ self._max_transfers_out = new_config.get('transfers_out')
+ self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ except Exception as e:
+ self._lock.release()
+ raise e
self._lock.release()
- self._log.log_message('info', 'update config data complete.')
+ logger.info(XFROUT_NEW_CONFIG_DONE)
+
+ def __create_zone_config(self, zone_config_list):
+ new_config = {}
+ for zconf in zone_config_list:
+ # convert the class, origin (name) pair. First build pydnspp
+ # object to reject invalid input.
+ zclass_str = zconf.get('class')
+ if zclass_str is None:
+ #zclass_str = 'IN' # temporary
+ zclass_str = self._cc.get_default_value('zone_config/class')
+ zclass = RRClass(zclass_str)
+ zorigin = Name(zconf['origin'], True)
+ config_key = (zclass.to_text(), zorigin.to_text())
+
+ # reject duplicate config
+ if config_key in new_config:
+ raise XfroutConfigError('Duplicate zone_config for ' +
+ str(zorigin) + '/' + str(zclass))
+
+ # create a new config entry, build any given (and known) config
+ new_config[config_key] = {}
+ if 'transfer_acl' in zconf:
+ try:
+ new_config[config_key]['transfer_acl'] = \
+ REQUEST_LOADER.load(zconf['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl ' +
+ 'for ' + zorigin.to_text() + '/' +
+ zclass_str + ': ' + str(e))
+ return new_config
+
+ def set_tsig_key_ring(self, key_list):
+ """Set the tsig_key_ring , given a TSIG key string list representation. """
+
+ # XXX add values to configure zones/tsig options
+ self.tsig_key_ring = TSIGKeyRing()
+ # If key string list is empty, create a empty tsig_key_ring
+ if not key_list:
+ return
+
+ for key_item in key_list:
+ try:
+ self.tsig_key_ring.add(TSIGKey(key_item))
+ except InvalidParameter as ipe:
+ logger.error(XFROUT_BAD_TSIG_KEY_STRING, str(key_item))
def get_db_file(self):
file, is_default = self._cc.get_remote_config_value("Auth", "database_file")
@@ -483,30 +690,28 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
class XfroutServer:
def __init__(self):
self._unix_socket_server = None
- self._log = None
self._listen_sock_file = UNIX_SOCKET_FILE
self._shutdown_event = threading.Event()
self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler)
self._config_data = self._cc.get_full_config()
self._cc.start()
self._cc.add_remote_config(AUTH_SPECFILE_LOCATION);
- self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
- self._config_data.get('log_severity'), self._config_data.get('log_versions'),
- self._config_data.get('log_max_bytes'), True)
self._start_xfr_query_listener()
self._start_notifier()
def _start_xfr_query_listener(self):
'''Start a new thread to accept xfr query. '''
- self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
- self._shutdown_event, self._config_data,
- self._cc, self._log);
+ self._unix_socket_server = UnixSockServer(self._listen_sock_file,
+ XfroutSession,
+ self._shutdown_event,
+ self._config_data,
+ self._cc)
listener = threading.Thread(target=self._unix_socket_server.serve_forever)
listener.start()
def _start_notifier(self):
datasrc = self._unix_socket_server.get_db_file()
- self._notifier = notify_out.NotifyOut(datasrc, self._log)
+ self._notifier = notify_out.NotifyOut(datasrc)
self._notifier.dispatcher()
def send_notify(self, zone_name, zone_class):
@@ -521,11 +726,13 @@ class XfroutServer:
continue
self._config_data[key] = new_config[key]
- if self._log:
- self._log.update_config(new_config)
-
if self._unix_socket_server:
- self._unix_socket_server.update_config_data(self._config_data)
+ try:
+ self._unix_socket_server.update_config_data(self._config_data)
+ except Exception as e:
+ answer = create_answer(1,
+ "Failed to handle new configuration: " +
+ str(e))
return answer
@@ -551,7 +758,7 @@ class XfroutServer:
def command_handler(self, cmd, args):
if cmd == "shutdown":
- self._log.log_message("info", "Received shutdown command.")
+ logger.info(XFROUT_RECEIVED_SHUTDOWN_COMMAND)
self.shutdown()
answer = create_answer(0)
@@ -559,8 +766,7 @@ class XfroutServer:
zone_name = args.get('zone_name')
zone_class = args.get('zone_class')
if zone_name and zone_class:
- self._log.log_message("info", "zone '%s/%s': receive notify others command" \
- % (zone_name, zone_class))
+ logger.info(XFROUT_NOTIFY_COMMAND, zone_name, zone_class)
self.send_notify(zone_name, zone_class)
answer = create_answer(0)
else:
@@ -603,15 +809,15 @@ if '__main__' == __name__:
xfrout_server = XfroutServer()
xfrout_server.run()
except KeyboardInterrupt:
- sys.stderr.write("[b10-xfrout] exit xfrout process\n")
+ logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
- sys.stderr.write("[b10-xfrout] Error creating xfrout, "
- "is the command channel daemon running?\n")
- except SessionTimeout as e:
- sys.stderr.write("[b10-xfrout] Error creating xfrout, "
- "is the configuration manager running?\n")
+ logger.error(XFROUT_CC_SESSION_ERROR, str(e))
except ModuleCCSessionError as e:
- sys.stderr.write("[b10-xfrout] exit xfrout process:%s\n" % str(e))
+ logger.error(XFROUT_MODULECC_SESSION_ERROR, str(e))
+ except XfroutConfigError as e:
+ logger.error(XFROUT_CONFIG_ERROR, str(e))
+ except SessionTimeout as e:
+ logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
if xfrout_server:
xfrout_server.shutdown()
diff --git a/src/bin/xfrout/xfrout.spec.pre.in b/src/bin/xfrout/xfrout.spec.pre.in
index 941db72..0891a57 100644
--- a/src/bin/xfrout/xfrout.spec.pre.in
+++ b/src/bin/xfrout/xfrout.spec.pre.in
@@ -16,27 +16,90 @@
},
{
"item_name": "log_file",
- "item_type": "string",
+ "item_type": "string",
"item_optional": false,
"item_default": "@@LOCALSTATEDIR@@/@PACKAGE@/log/Xfrout.log"
},
{
"item_name": "log_severity",
- "item_type": "string",
+ "item_type": "string",
"item_optional": false,
- "item_default": "debug"
+ "item_default": "debug"
},
{
"item_name": "log_versions",
- "item_type": "integer",
+ "item_type": "integer",
"item_optional": false,
- "item_default": 5
+ "item_default": 5
},
{
"item_name": "log_max_bytes",
- "item_type": "integer",
+ "item_type": "integer",
"item_optional": false,
- "item_default": 1048576
+ "item_default": 1048576
+ },
+ {
+ "item_name": "tsig_key_ring",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec" :
+ {
+ "item_name": "tsig_key",
+ "item_type": "string",
+ "item_optional": true
+ }
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ },
+ {
+ "item_name": "zone_config",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec":
+ {
+ "item_name": "zone_config_element",
+ "item_type": "map",
+ "item_optional": true,
+ "item_default": { "origin": "" },
+ "map_item_spec": [
+ {
+ "item_name": "origin",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
new file mode 100644
index 0000000..b2e432c
--- /dev/null
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -0,0 +1,162 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrout messages python module.
+
+% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
+A transfer out of the given zone has started.
+
+% XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFROUT_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+
+% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+
+% XFROUT_CONFIG_ERROR error found in configuration data: %1
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+
+% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+
+% XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+
+% XFROUT_HANDLE_QUERY_ERROR error while handling query: %1
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+
+% XFROUT_IMPORT error importing python module: %1
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+
+% XFROUT_NEW_CONFIG Update xfrout configuration
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+
+% XFROUT_NEW_CONFIG_DONE Update xfrout configuration done
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+
+% XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+
+% XFROUT_PARSE_QUERY_ERROR error parsing query: %1
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+
+% XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+
+% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+
+% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+
+% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+
+% XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+
+% XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+
+% XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+
+% XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+
+% XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+
+% XFROUT_STOPPING the xfrout daemon is shutting down
+The current transfer is aborted, as the xfrout daemon is shutting down.
+
+% XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 410279a..aa427fd 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -7,10 +7,15 @@ pkglibexec_SCRIPTS = b10-zonemgr
b10_zonemgrdir = $(pkgdatadir)
b10_zonemgr_DATA = zonemgr.spec
-CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.pyc
man_MANS = b10-zonemgr.8
-EXTRA_DIST = $(man_MANS) b10-zonemgr.xml
+EXTRA_DIST = $(man_MANS) b10-zonemgr.xml zonemgr_messages.mes
if ENABLE_MAN
@@ -19,10 +24,20 @@ b10-zonemgr.8: b10-zonemgr.xml
endif
+# Build logging source file from message files
+$(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py : zonemgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/zonemgr_messages.mes
+
zonemgr.spec: zonemgr.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.spec.pre >$@
-b10-zonemgr: zonemgr.py
+b10-zonemgr: zonemgr.py $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/zonemgr/b10-zonemgr.8 b/src/bin/zonemgr/b10-zonemgr.8
index fbd0602..bfc0a7b 100644
--- a/src/bin/zonemgr/b10-zonemgr.8
+++ b/src/bin/zonemgr/b10-zonemgr.8
@@ -2,12 +2,12 @@
.\" Title: b10-zonemgr
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: October 18, 2010
+.\" Date: May 19, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-ZONEMGR" "8" "October 18, 2010" "BIND10" "BIND10"
+.TH "B10\-ZONEMGR" "8" "May 19, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -46,11 +46,6 @@ receives its configurations from
The configurable settings are:
.PP
-\fIjitter_scope\fR
-defines the random jitter range subtracted from the refresh and retry timers to avoid many zones from refreshing at the same time\&. The refresh or retry time actually used is a random time between the defined refresh or retry time and it multiplied by the
-\fIjitter_scope\fR\&. This is re\-evaluated after each refresh or retry\&. This value is a real number and the maximum is 0\&.5 (half of the refresh or retry time)\&. The default is 0\&.25\&. Set to 0 to disable the jitter\&.
-.PP
-
\fIlowerbound_refresh\fR
defines the minimum SOA REFRESH time in seconds\&. The default is 10\&.
.PP
@@ -59,10 +54,36 @@ defines the minimum SOA REFRESH time in seconds\&. The default is 10\&.
defines the minimum SOA RETRY time in seconds\&. The default is 5\&.
.PP
+\fIrefresh_jitter\fR
+This value is a real number\&. The maximum amount is 0\&.5\&. The default is 0\&.25\&.
+.PP
+
+\fIreload_jitter\fR
+This value is a real number\&. The default is 0\&.75\&.
+.PP
+
\fImax_transfer_timeout\fR
defines the maximum amount of time in seconds for a transfer\&.
The default is 14400 (4 hours)\&.
.PP
+
+\fIsecondary_zones\fR
+is a list of slave zones that the
+\fBb10\-zonemgr\fR
+should keep timers for\&. The list items include the
+\fIname\fR
+(which defines the zone name) and the
+\fIclass\fR
+(which defaults to
+\(lqIN\(rq)\&.
+.PP
+(A deprecated configuration is
+\fIjitter_scope\fR
+which is superceded by
+\fIrefresh_jitter\fR
+and
+\fIreload_jitter\fR\&.)
+.PP
The configuration commands are:
.PP
@@ -107,5 +128,5 @@ The
daemon was designed in July 2010 by CNNIC for the ISC BIND 10 project\&.
.SH "COPYRIGHT"
.br
-Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
+Copyright \(co 2010-2011 Internet Systems Consortium, Inc. ("ISC")
.br
diff --git a/src/bin/zonemgr/b10-zonemgr.xml b/src/bin/zonemgr/b10-zonemgr.xml
index 4d796ee..00f5d04 100644
--- a/src/bin/zonemgr/b10-zonemgr.xml
+++ b/src/bin/zonemgr/b10-zonemgr.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>October 18, 2010</date>
+ <date>May 19, 2011</date>
</refentryinfo>
<refmeta>
@@ -36,7 +36,7 @@
<docinfo>
<copyright>
- <year>2010</year>
+ <year>2010-2011</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
@@ -92,6 +92,39 @@
<para>
The configurable settings are:
</para>
+
+ <para>
+ <varname>lowerbound_refresh</varname>
+ defines the minimum SOA REFRESH time in seconds.
+ The default is 10.
+ </para>
+
+ <para>
+ <varname>lowerbound_retry</varname>
+ defines the minimum SOA RETRY time in seconds.
+ The default is 5.
+ </para>
+
+ <para>
+ <varname>refresh_jitter</varname>
+ This value is a real number.
+ The maximum amount is 0.5.
+ The default is 0.25.
+ </para>
+<!-- TODO: needs to be documented -->
+<!-- TODO: Set to 0 to disable the jitter. -->
+
+ <para>
+ <varname>reload_jitter</varname>
+ This value is a real number.
+ The default is 0.75.
+ </para>
+<!-- TODO: needs to be documented -->
+<!-- TODO: Set to 0 to disable the jitter. -->
+<!-- what does 0 do? -->
+<!-- TODO: no max? -->
+
+<!-- TODO: remove this. This is old removed config
<para>
<varname>jitter_scope</varname>
defines the random jitter range subtracted from the refresh
@@ -106,16 +139,8 @@
The default is 0.25.
Set to 0 to disable the jitter.
</para>
- <para>
- <varname>lowerbound_refresh</varname>
- defines the minimum SOA REFRESH time in seconds.
- The default is 10.
- </para>
- <para>
- <varname>lowerbound_retry</varname>
- defines the minimum SOA RETRY time in seconds.
- The default is 5.
- </para>
+-->
+
<para>
<varname>max_transfer_timeout</varname>
defines the maximum amount of time in seconds for a transfer.
@@ -123,6 +148,21 @@
The default is 14400 (4 hours).
</para>
+<!-- TODO: this duplicates list in Xfrin too -->
+ <para>
+ <varname>secondary_zones</varname> is a list of slave zones
+ that the <command>b10-zonemgr</command> should keep timers for.
+ The list items include the <varname>name</varname> (which
+ defines the zone name) and the <varname>class</varname>
+ (which defaults to <quote>IN</quote>).
+ </para>
+
+ <para>
+ (A deprecated configuration is <varname>jitter_scope</varname>
+ which is superceded by <varname>refresh_jitter</varname>
+ and <varname>reload_jitter</varname>.)
+ </para>
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 496c1a4..769d332 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = zonemgr_test.py
EXTRA_DIST = $(PYTESTS)
CLEANFILES = initdb.file
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -12,6 +19,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py
index c6d151d..80e41b3 100644
--- a/src/bin/zonemgr/tests/zonemgr_test.py
+++ b/src/bin/zonemgr/tests/zonemgr_test.py
@@ -21,11 +21,12 @@ import os
import tempfile
from zonemgr import *
-ZONE_NAME_CLASS1_IN = ("sd.cn.", "IN")
-ZONE_NAME_CLASS2_CH = ("tw.cn.", "CH")
-ZONE_NAME_CLASS3_IN = ("example.com", "IN")
-ZONE_NAME_CLASS1_CH = ("sd.cn.", "CH")
-ZONE_NAME_CLASS2_IN = ("tw.cn.", "IN")
+ZONE_NAME_CLASS1_IN = ("example.net.", "IN")
+ZONE_NAME_CLASS1_CH = ("example.net.", "CH")
+ZONE_NAME_CLASS2_IN = ("example.org.", "IN")
+ZONE_NAME_CLASS2_CH = ("example.org.", "CH")
+ZONE_NAME_CLASS3_IN = ("example.com.", "IN")
+ZONE_NAME_CLASS3_CH = ("example.com.", "CH")
MAX_TRANSFER_TIMEOUT = 14400
LOWERBOUND_REFRESH = 10
@@ -80,12 +81,12 @@ class MyZonemgrRefresh(ZonemgrRefresh):
self._refresh_jitter = 0.25
def get_zone_soa(zone_name, db_file):
- if zone_name == 'sd.cn.':
- return (1, 2, 'sd.cn.', 'cn.sd.', 21600, 'SOA', None,
- 'a.dns.cn. root.cnnic.cn. 2009073106 7200 3600 2419200 21600')
- elif zone_name == 'tw.cn.':
- return (1, 2, 'tw.cn.', 'cn.sd.', 21600, 'SOA', None,
- 'a.dns.cn. root.cnnic.cn. 2009073112 7200 3600 2419200 21600')
+ if zone_name == 'example.net.':
+ return (1, 2, 'example.net.', 'example.net.sd.', 21600, 'SOA', None,
+ 'a.example.net. root.example.net. 2009073106 7200 3600 2419200 21600')
+ elif zone_name == 'example.org.':
+ return (1, 2, 'example.org.', 'example.org.sd.', 21600, 'SOA', None,
+ 'a.example.org. root.example.org. 2009073112 7200 3600 2419200 21600')
else:
return None
sqlite3_ds.get_zone_soa = get_zone_soa
@@ -94,15 +95,15 @@ class MyZonemgrRefresh(ZonemgrRefresh):
self._slave_socket, FakeConfig())
current_time = time.time()
self._zonemgr_refresh_info = {
- ('sd.cn.', 'IN'): {
+ ('example.net.', 'IN'): {
'last_refresh_time': current_time,
'next_refresh_time': current_time + 6500,
- 'zone_soa_rdata': 'a.dns.cn. root.cnnic.cn. 2009073105 7200 3600 2419200 21600',
+ 'zone_soa_rdata': 'a.example.net. root.example.net. 2009073105 7200 3600 2419200 21600',
'zone_state': 0},
- ('tw.cn.', 'CH'): {
+ ('example.org.', 'CH'): {
'last_refresh_time': current_time,
'next_refresh_time': current_time + 6900,
- 'zone_soa_rdata': 'a.dns.cn. root.cnnic.cn. 2009073112 7200 3600 2419200 21600',
+ 'zone_soa_rdata': 'a.example.org. root.example.org. 2009073112 7200 3600 2419200 21600',
'zone_state': 0}
}
@@ -151,12 +152,23 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue((time1 + 3600 * (1 - self.zone_refresh._refresh_jitter)) <= zone_timeout)
self.assertTrue(zone_timeout <= time2 + 3600)
+ # No soa rdata
+ self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_soa_rdata"] = None
+ time3 = time.time()
+ self.zone_refresh._set_zone_retry_timer(ZONE_NAME_CLASS1_IN)
+ zone_timeout = self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["next_refresh_time"]
+ time4 = time.time()
+ self.assertTrue((time3 + self.zone_refresh._lowerbound_retry * (1 - self.zone_refresh._refresh_jitter))
+ <= zone_timeout)
+ self.assertTrue(zone_timeout <= time4 + self.zone_refresh._lowerbound_retry)
+
def test_zone_not_exist(self):
self.assertFalse(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_IN))
self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_CH))
self.assertFalse(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS2_CH))
self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS2_IN))
self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS3_IN))
+ self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS3_CH))
def test_set_zone_notify_timer(self):
time1 = time.time()
@@ -179,20 +191,20 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue(self.zone_refresh._zone_is_expired(ZONE_NAME_CLASS1_IN))
def test_get_zone_soa_rdata(self):
- soa_rdata1 = 'a.dns.cn. root.cnnic.cn. 2009073105 7200 3600 2419200 21600'
- soa_rdata2 = 'a.dns.cn. root.cnnic.cn. 2009073112 7200 3600 2419200 21600'
+ soa_rdata1 = 'a.example.net. root.example.net. 2009073105 7200 3600 2419200 21600'
+ soa_rdata2 = 'a.example.org. root.example.org. 2009073112 7200 3600 2419200 21600'
self.assertEqual(soa_rdata1, self.zone_refresh._get_zone_soa_rdata(ZONE_NAME_CLASS1_IN))
self.assertRaises(KeyError, self.zone_refresh._get_zone_soa_rdata, ZONE_NAME_CLASS1_CH)
self.assertEqual(soa_rdata2, self.zone_refresh._get_zone_soa_rdata(ZONE_NAME_CLASS2_CH))
self.assertRaises(KeyError, self.zone_refresh._get_zone_soa_rdata, ZONE_NAME_CLASS2_IN)
def test_zonemgr_reload_zone(self):
- soa_rdata = 'a.dns.cn. root.cnnic.cn. 2009073106 1800 900 2419200 21600'
+ soa_rdata = 'a.example.net. root.example.net. 2009073106 1800 900 2419200 21600'
# We need to restore this not to harm other tests
old_get_zone_soa = sqlite3_ds.get_zone_soa
def get_zone_soa(zone_name, db_file):
- return (1, 2, 'sd.cn.', 'cn.sd.', 21600, 'SOA', None,
- 'a.dns.cn. root.cnnic.cn. 2009073106 1800 900 2419200 21600')
+ return (1, 2, 'example.net.', 'example.net.sd.', 21600, 'SOA', None,
+ 'a.example.net. root.example.net. 2009073106 1800 900 2419200 21600')
sqlite3_ds.get_zone_soa = get_zone_soa
self.zone_refresh.zonemgr_reload_zone(ZONE_NAME_CLASS1_IN)
@@ -274,15 +286,15 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue(self.zone_refresh._zone_mgr_is_empty())
def test_zonemgr_add_zone(self):
- soa_rdata = 'a.dns.cn. root.cnnic.cn. 2009073106 1800 900 2419200 21600'
+ soa_rdata = 'a.example.net. root.example.net. 2009073106 1800 900 2419200 21600'
# This needs to be restored. The following test actually failed if we left
# this unclean
old_get_zone_soa = sqlite3_ds.get_zone_soa
time1 = time.time()
def get_zone_soa(zone_name, db_file):
- return (1, 2, 'sd.cn.', 'cn.sd.', 21600, 'SOA', None,
- 'a.dns.cn. root.cnnic.cn. 2009073106 1800 900 2419200 21600')
+ return (1, 2, 'example.net.', 'example.net.sd.', 21600, 'SOA', None,
+ 'a.example.net. root.example.net. 2009073106 1800 900 2419200 21600')
sqlite3_ds.get_zone_soa = get_zone_soa
@@ -302,8 +314,8 @@ class TestZonemgrRefresh(unittest.TestCase):
def get_zone_soa2(zone_name, db_file):
return None
sqlite3_ds.get_zone_soa = get_zone_soa2
- self.assertRaises(ZonemgrException, self.zone_refresh.zonemgr_add_zone, \
- ZONE_NAME_CLASS1_IN)
+ self.zone_refresh.zonemgr_add_zone(ZONE_NAME_CLASS2_IN)
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS2_IN]["zone_soa_rdata"] is None)
sqlite3_ds.get_zone_soa = old_get_zone_soa
def test_zone_handle_notify(self):
@@ -314,15 +326,15 @@ class TestZonemgrRefresh(unittest.TestCase):
current_time = time.time()
self.assertTrue(zone_timeout <= current_time)
self.assertRaises(ZonemgrException, self.zone_refresh.zone_handle_notify,\
- ("org.cn.", "IN"), "127.0.0.1")
+ ZONE_NAME_CLASS3_CH, "127.0.0.1")
self.assertRaises(ZonemgrException, self.zone_refresh.zone_handle_notify,\
ZONE_NAME_CLASS3_IN, "127.0.0.1")
def test_zone_refresh_success(self):
- soa_rdata = 'a.dns.cn. root.cnnic.cn. 2009073106 1800 900 2419200 21600'
+ soa_rdata = 'a.example.net. root.example.net. 2009073106 1800 900 2419200 21600'
def get_zone_soa(zone_name, db_file):
- return (1, 2, 'sd.cn.', 'cn.sd.', 21600, 'SOA', None,
- 'a.dns.cn. root.cnnic.cn. 2009073106 1800 900 2419200 21600')
+ return (1, 2, 'example.net.', 'example.net.sd.', 21600, 'SOA', None,
+ 'a.example.net. root.example.net. 2009073106 1800 900 2419200 21600')
sqlite3_ds.get_zone_soa = get_zone_soa
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"] = ZONE_REFRESHING
@@ -337,11 +349,11 @@ class TestZonemgrRefresh(unittest.TestCase):
last_refresh_time = self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["last_refresh_time"]
self.assertTrue(time1 <= last_refresh_time)
self.assertTrue(last_refresh_time <= time2)
- self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_success, ("org.cn.", "CH"))
+ self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_success, ("example.test.", "CH"))
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_success, ZONE_NAME_CLASS3_IN)
def test_zone_refresh_fail(self):
- soa_rdata = 'a.dns.cn. root.cnnic.cn. 2009073105 7200 3600 2419200 21600'
+ soa_rdata = 'a.example.net. root.example.net. 2009073105 7200 3600 2419200 21600'
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"] = ZONE_REFRESHING
self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
@@ -357,22 +369,31 @@ class TestZonemgrRefresh(unittest.TestCase):
self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
self.assertEqual(ZONE_EXPIRED, self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"])
- self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ("org.cn.", "CH"))
+ self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_CH)
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_IN)
+ old_get_zone_soa = sqlite3_ds.get_zone_soa
+ def get_zone_soa(zone_name, db_file):
+ return None
+ sqlite3_ds.get_zone_soa = get_zone_soa
+ self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
+ self.assertEqual(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"],
+ ZONE_EXPIRED)
+ sqlite3_ds.get_zone_soa = old_get_zone_soa
+
def test_find_need_do_refresh_zone(self):
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info = {
- ("sd.cn.","IN"):{
+ ("example.net.","IN"):{
'last_refresh_time': time1,
'next_refresh_time': time1 + 7200,
- 'zone_soa_rdata': 'a.dns.cn. root.cnnic.cn. 2009073105 7200 3600 2419200 21600',
+ 'zone_soa_rdata': 'a.example.net. root.example.net. 2009073105 7200 3600 2419200 21600',
'zone_state': ZONE_OK},
- ("tw.cn.","CH"):{
+ ("example.org.","CH"):{
'last_refresh_time': time1 - 7200,
'next_refresh_time': time1,
'refresh_timeout': time1 + MAX_TRANSFER_TIMEOUT,
- 'zone_soa_rdata': 'a.dns.cn. root.cnnic.cn. 2009073112 7200 3600 2419200 21600',
+ 'zone_soa_rdata': 'a.example.org. root.example.org. 2009073112 7200 3600 2419200 21600',
'zone_state': ZONE_REFRESHING}
}
zone_need_refresh = self.zone_refresh._find_need_do_refresh_zone()
@@ -385,10 +406,10 @@ class TestZonemgrRefresh(unittest.TestCase):
def test_do_refresh(self):
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info = {
- ("sd.cn.", "IN"):{
+ ("example.net.", "IN"):{
'last_refresh_time': time1 - 7200,
'next_refresh_time': time1 - 1,
- 'zone_soa_rdata': 'a.dns.cn. root.cnnic.cn. 2009073105 7200 3600 2419200 21600',
+ 'zone_soa_rdata': 'a.example.net. root.example.net. 2009073105 7200 3600 2419200 21600',
'zone_state': ZONE_OK}
}
self.zone_refresh._do_refresh(ZONE_NAME_CLASS1_IN)
@@ -416,10 +437,10 @@ class TestZonemgrRefresh(unittest.TestCase):
"""
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info = {
- ("sd.cn.", "IN"):{
+ ("example.net.", "IN"):{
'last_refresh_time': time1 - 7200,
'next_refresh_time': time1 - 1,
- 'zone_soa_rdata': 'a.dns.cn. root.cnnic.cn. 2009073105 7200 3600 2419200 21600',
+ 'zone_soa_rdata': 'a.example.net. root.example.net. 2009073105 7200 3600 2419200 21600',
'zone_state': ZONE_OK}
}
self.zone_refresh._check_sock = self.zone_refresh._master_socket
@@ -432,6 +453,16 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue(zone_state == ZONE_REFRESHING)
def test_update_config_data(self):
+ # make sure it doesn't fail if we only provide secondary zones
+ config_data = {
+ "secondary_zones": [ { "name": "example.net.",
+ "class": "IN" } ]
+ }
+ self.zone_refresh.update_config_data(config_data)
+ self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh._zonemgr_refresh_info)
+
+ # update all values
config_data = {
"lowerbound_refresh" : 60,
"lowerbound_retry" : 30,
@@ -447,6 +478,55 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
self.assertEqual(0.75, self.zone_refresh._reload_jitter)
+ # make sure they are not reset when we only update one
+ config_data = {
+ "reload_jitter" : 0.35,
+ }
+ self.zone_refresh.update_config_data(config_data)
+ self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(30, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.35, self.zone_refresh._reload_jitter)
+
+ # and make sure we restore the previous config if something
+ # goes wrong
+ config_data = {
+ "lowerbound_refresh" : 61,
+ "lowerbound_retry" : 31,
+ "max_transfer_timeout" : 19801,
+ "refresh_jitter" : 0.21,
+ "reload_jitter" : 0.71,
+ "secondary_zones": [ { "name": "doesnotexist",
+ "class": "IN" } ]
+ }
+ self.zone_refresh.update_config_data(config_data)
+ name_class = ("doesnotexist.", "IN")
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ # The other configs should be updated successfully
+ self.assertEqual(61, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(31, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19801, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0.21, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.71, self.zone_refresh._reload_jitter)
+
+ # Make sure we accept 0 as a value
+ config_data = {
+ "lowerbound_refresh" : 60,
+ "lowerbound_retry" : 30,
+ "max_transfer_timeout" : 19800,
+ "refresh_jitter" : 0,
+ "reload_jitter" : 0.75,
+ "secondary_zones": []
+ }
+ self.zone_refresh.update_config_data(config_data)
+ self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(30, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.75, self.zone_refresh._reload_jitter)
+
def test_shutdown(self):
self.zone_refresh._check_sock = self.zone_refresh._master_socket
listener = self.zone_refresh.run_timer()
@@ -465,19 +545,20 @@ class TestZonemgrRefresh(unittest.TestCase):
# Put something in
config.set_zone_list_from_name_classes([ZONE_NAME_CLASS1_IN])
self.zone_refresh.update_config_data(config)
- self.assertTrue(("sd.cn.", "IN") in
+ self.assertTrue(("example.net.", "IN") in
self.zone_refresh._zonemgr_refresh_info)
# This one does not exist
config.set_zone_list_from_name_classes(["example.net", "CH"])
- self.assertRaises(ZonemgrException,
- self.zone_refresh.update_config_data, config)
- # So it should not affect the old ones
- self.assertTrue(("sd.cn.", "IN") in
+ self.zone_refresh.update_config_data(config)
+ self.assertFalse(("example.net.", "CH") in
+ self.zone_refresh._zonemgr_refresh_info)
+ # Simply skip loading soa for the zone, the other configs should be updated successful
+ self.assertFalse(("example.net.", "IN") in
self.zone_refresh._zonemgr_refresh_info)
# Make sure it works even when we "accidentally" forget the final dot
- config.set_zone_list_from_name_classes([("sd.cn", "IN")])
+ config.set_zone_list_from_name_classes([("example.net", "IN")])
self.zone_refresh.update_config_data(config)
- self.assertTrue(("sd.cn.", "IN") in
+ self.assertTrue(("example.net.", "IN") in
self.zone_refresh._zonemgr_refresh_info)
def tearDown(self):
@@ -532,32 +613,35 @@ class TestZonemgr(unittest.TestCase):
self.assertEqual(self.zonemgr.config_handler(config_data1),
{"result": [0]})
self.assertEqual(config_data1, self.zonemgr._config_data)
- config_data2 = {"zone_name" : "sd.cn.", "port" : "53", "master" : "192.168.1.1"}
+ config_data2 = {"zone_name" : "example.net.", "port" : "53", "master" : "192.168.1.1"}
self.zonemgr.config_handler(config_data2)
self.assertEqual(config_data1, self.zonemgr._config_data)
# jitter should not be bigger than half of the original value
config_data3 = {"refresh_jitter" : 0.7}
self.zonemgr.config_handler(config_data3)
self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
- # The zone doesn't exist in database, it should be rejected
+ # The zone doesn't exist in database, simply skip loading soa for it and log an warning
self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
config_data1)
config_data1["secondary_zones"] = [{"name": "nonexistent.example",
"class": "IN"}]
- self.assertNotEqual(self.zonemgr.config_handler(config_data1),
- {"result": [0]})
- # As it is rejected, the old value should be kept
- self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
+ self.assertEqual(self.zonemgr.config_handler(config_data1),
+ {"result": [0]})
+ # other configs should be updated successfully
+ name_class = ("nonexistent.example.", "IN")
+ self.assertTrue(self.zonemgr._zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ self.assertEqual(0.1, self.zonemgr._config_data.get("refresh_jitter"))
def test_get_db_file(self):
self.assertEqual("initdb.file", self.zonemgr.get_db_file())
def test_parse_cmd_params(self):
- params1 = {"zone_name" : "org.cn", "zone_class" : "CH", "master" : "127.0.0.1"}
- answer1 = (("org.cn", "CH"), "127.0.0.1")
+ params1 = {"zone_name" : "example.com.", "zone_class" : "CH", "master" : "127.0.0.1"}
+ answer1 = (ZONE_NAME_CLASS3_CH, "127.0.0.1")
self.assertEqual(answer1, self.zonemgr._parse_cmd_params(params1, ZONE_NOTIFY_COMMAND))
- params2 = {"zone_name" : "org.cn", "zone_class" : "CH"}
- answer2 = ("org.cn", "CH")
+ params2 = {"zone_name" : "example.com.", "zone_class" : "IN"}
+ answer2 = ZONE_NAME_CLASS3_IN
self.assertEqual(answer2, self.zonemgr._parse_cmd_params(params2, ZONE_XFRIN_SUCCESS_COMMAND))
self.assertRaises(ZonemgrException, self.zonemgr._parse_cmd_params, params2, ZONE_NOTIFY_COMMAND)
params1 = {"zone_class" : "CH"}
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index cc6d7b9..5c8d9b5 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -37,6 +37,16 @@ from isc.datasrc import sqlite3_ds
from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
import isc.util.process
+from isc.log_messages.zonemgr_messages import *
+
+# Initialize logging for called modules.
+isc.log.init("b10-zonemgr")
+logger = isc.log.Logger("zonemgr")
+
+# Constants for debug levels, to be removed when we have #1074.
+DBG_START_SHUT = 0
+DBG_ZONEMGR_COMMAND = 10
+DBG_ZONEMGR_BASIC = 40
isc.util.process.rename()
@@ -77,13 +87,6 @@ REFRESH_OFFSET = 3
RETRY_OFFSET = 4
EXPIRED_OFFSET = 5
-# verbose mode
-VERBOSE_MODE = False
-
-def log_msg(msg):
- if VERBOSE_MODE:
- sys.stdout.write("[b10-zonemgr] %s\n" % str(msg))
-
class ZonemgrException(Exception):
pass
@@ -93,7 +96,6 @@ class ZonemgrRefresh:
do zone refresh.
Zone timers can be started by calling run_timer(), and it
can be stopped by calling shutdown() in another thread.
-
"""
def __init__(self, cc, db_file, slave_socket, config_data):
@@ -101,6 +103,11 @@ class ZonemgrRefresh:
self._check_sock = slave_socket
self._db_file = db_file
self._zonemgr_refresh_info = {}
+ self._lowerbound_refresh = None
+ self._lowerbound_retry = None
+ self._max_transfer_timeout = None
+ self._refresh_jitter = None
+ self._reload_jitter = None
self.update_config_data(config_data)
self._running = False
@@ -135,7 +142,10 @@ class ZonemgrRefresh:
"""Set zone next refresh time after zone refresh fail.
now + retry - retry_jitter <= next_refresh_time <= now + retry
"""
- zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ if (self._get_zone_soa_rdata(zone_name_class) is not None):
+ zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ else:
+ zone_retry_time = 0.0
zone_retry_time = max(self._lowerbound_retry, zone_retry_time)
self._set_zone_timer(zone_name_class, zone_retry_time, self._refresh_jitter * zone_retry_time)
@@ -152,6 +162,7 @@ class ZonemgrRefresh:
def zone_refresh_success(self, zone_name_class):
"""Update zone info after zone refresh success"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_SUCCESS, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
self.zonemgr_reload_zone(zone_name_class)
@@ -162,10 +173,12 @@ class ZonemgrRefresh:
def zone_refresh_fail(self, zone_name_class):
"""Update zone info after zone refresh fail"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_FAIL, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
# Is zone expired?
- if (self._zone_is_expired(zone_name_class)):
+ if ((self._get_zone_soa_rdata(zone_name_class) is None) or
+ self._zone_is_expired(zone_name_class)):
self._set_zone_state(zone_name_class, ZONE_EXPIRED)
else:
self._set_zone_state(zone_name_class, ZONE_OK)
@@ -174,6 +187,7 @@ class ZonemgrRefresh:
def zone_handle_notify(self, zone_name_class, master):
"""Handle zone notify"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_NOTIFIED, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Notified zone (%s, %s) "
"doesn't belong to zonemgr" % zone_name_class)
self._set_zone_notifier_master(zone_name_class, master)
@@ -186,19 +200,23 @@ class ZonemgrRefresh:
def zonemgr_add_zone(self, zone_name_class):
""" Add a zone into zone manager."""
- log_msg("Loading zone (%s, %s)" % zone_name_class)
+
+ logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_LOAD_ZONE, zone_name_class[0], zone_name_class[1])
zone_info = {}
zone_soa = sqlite3_ds.get_zone_soa(str(zone_name_class[0]), self._db_file)
- if not zone_soa:
- raise ZonemgrException("[b10-zonemgr] zone (%s, %s) doesn't have soa." % zone_name_class)
- zone_info["zone_soa_rdata"] = zone_soa[7]
+ if zone_soa is None:
+ logger.warn(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
+ zone_info["zone_soa_rdata"] = None
+ zone_reload_time = 0.0
+ else:
+ zone_info["zone_soa_rdata"] = zone_soa[7]
+ zone_reload_time = float(zone_soa[7].split(" ")[RETRY_OFFSET])
zone_info["zone_state"] = ZONE_OK
zone_info["last_refresh_time"] = self._get_current_time()
self._zonemgr_refresh_info[zone_name_class] = zone_info
# Imposes some random jitters to avoid many zones need to do refresh at the same time.
- zone_reload_jitter = float(zone_soa[7].split(" ")[RETRY_OFFSET])
- zone_reload_jitter = max(self._lowerbound_retry, zone_reload_jitter)
- self._set_zone_timer(zone_name_class, zone_reload_jitter, self._reload_jitter * zone_reload_jitter)
+ zone_reload_time = max(self._lowerbound_retry, zone_reload_time)
+ self._set_zone_timer(zone_name_class, zone_reload_time, self._reload_jitter * zone_reload_time)
def _zone_is_expired(self, zone_name_class):
"""Judge whether a zone is expired or not."""
@@ -260,7 +278,7 @@ class ZonemgrRefresh:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error:
- sys.stderr.write("[b10-zonemgr] Failed to send to module %s, the session has been closed." % module_name)
+ logger.error(ZONEMGR_SEND_FAIL, module_name)
def _find_need_do_refresh_zone(self):
"""Find the first zone need do refresh, if no zone need
@@ -269,7 +287,8 @@ class ZonemgrRefresh:
zone_need_refresh = None
for zone_name_class in self._zonemgr_refresh_info.keys():
zone_state = self._get_zone_state(zone_name_class)
- # If hasn't received refresh response but are within refresh timeout, skip the zone
+ # If hasn't received refresh response but are within refresh
+ # timeout, skip the zone
if (ZONE_REFRESHING == zone_state and
(self._get_zone_refresh_timeout(zone_name_class) > self._get_current_time())):
continue
@@ -289,7 +308,7 @@ class ZonemgrRefresh:
def _do_refresh(self, zone_name_class):
"""Do zone refresh."""
- log_msg("Do refresh for zone (%s, %s)." % zone_name_class)
+ logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_REFRESH_ZONE, zone_name_class[0], zone_name_class[1])
self._set_zone_state(zone_name_class, ZONE_REFRESHING)
self._set_zone_refresh_timeout(zone_name_class, self._get_current_time() + self._max_transfer_timeout)
notify_master = self._get_zone_notifier_master(zone_name_class)
@@ -346,7 +365,7 @@ class ZonemgrRefresh:
if e.args[0] == errno.EINTR:
(rlist, wlist, xlist) = ([], [], [])
else:
- sys.stderr.write("[b10-zonemgr] Error with select(); %s\n" % e)
+ logger.error(ZONEMGR_SELECT_ERROR, e);
break
for fd in rlist:
@@ -360,12 +379,14 @@ class ZonemgrRefresh:
def run_timer(self, daemon=False):
"""
- Keep track of zone timers. Spawns and starts a thread. The thread object is returned.
+ Keep track of zone timers. Spawns and starts a thread. The thread object
+ is returned.
You can stop it by calling shutdown().
"""
# Small sanity check
if self._running:
+ logger.error(ZONEMGR_TIMER_THREAD_RUNNING)
raise RuntimeError("Trying to run the timers twice at the same time")
# Prepare the launch
@@ -390,6 +411,7 @@ class ZonemgrRefresh:
called from a different thread.
"""
if not self._running:
+ logger.error(ZONEMGR_NO_TIMER_THREAD)
raise RuntimeError("Trying to shutdown, but not running")
# Ask the thread to stop
@@ -404,37 +426,56 @@ class ZonemgrRefresh:
def update_config_data(self, new_config):
""" update ZonemgrRefresh config """
- backup = self._zonemgr_refresh_info.copy()
+ # Get a new value, but only if it is defined (commonly used below)
+ # We don't use "value or default", because if value would be
+ # 0, we would take default
+ def val_or_default(value, default):
+ if value is not None:
+ return value
+ else:
+ return default
+
+ self._lowerbound_refresh = val_or_default(
+ new_config.get('lowerbound_refresh'), self._lowerbound_refresh)
+
+ self._lowerbound_retry = val_or_default(
+ new_config.get('lowerbound_retry'), self._lowerbound_retry)
+
+ self._max_transfer_timeout = val_or_default(
+ new_config.get('max_transfer_timeout'), self._max_transfer_timeout)
+
+ self._refresh_jitter = val_or_default(
+ new_config.get('refresh_jitter'), self._refresh_jitter)
+
+ self._reload_jitter = val_or_default(
+ new_config.get('reload_jitter'), self._reload_jitter)
+
try:
required = {}
- # Add new zones
- for secondary_zone in new_config.get('secondary_zones'):
- name = secondary_zone['name']
- # Be tolerant to sclerotic users who forget the final dot
- if name[-1] != '.':
- name = name + '.'
- name_class = (name, secondary_zone['class'])
- required[name_class] = True
- # Add it only if it isn't there already
- if not name_class in self._zonemgr_refresh_info:
- self.zonemgr_add_zone(name_class)
- # Drop the zones that are no longer there
- # Do it in two phases, python doesn't like deleting while iterating
- to_drop = []
- for old_zone in self._zonemgr_refresh_info:
- if not old_zone in required:
- to_drop.append(old_zone)
- for drop in to_drop:
- del self._zonemgr_refresh_info[drop]
- # If we are not able to find it in database, restore the original
+ secondary_zones = new_config.get('secondary_zones')
+ if secondary_zones is not None:
+ # Add new zones
+ for secondary_zone in new_config.get('secondary_zones'):
+ name = secondary_zone['name']
+ # Be tolerant to sclerotic users who forget the final dot
+ if name[-1] != '.':
+ name = name + '.'
+ name_class = (name, secondary_zone['class'])
+ required[name_class] = True
+ # Add it only if it isn't there already
+ if not name_class in self._zonemgr_refresh_info:
+ # If we are not able to find it in database, log an warning
+ self.zonemgr_add_zone(name_class)
+ # Drop the zones that are no longer there
+ # Do it in two phases, python doesn't like deleting while iterating
+ to_drop = []
+ for old_zone in self._zonemgr_refresh_info:
+ if not old_zone in required:
+ to_drop.append(old_zone)
+ for drop in to_drop:
+ del self._zonemgr_refresh_info[drop]
except:
- self._zonemgr_refresh_info = backup
raise
- self._lowerbound_refresh = new_config.get('lowerbound_refresh')
- self._lowerbound_retry = new_config.get('lowerbound_retry')
- self._max_transfer_timeout = new_config.get('max_transfer_timeout')
- self._refresh_jitter = new_config.get('refresh_jitter')
- self._reload_jitter = new_config.get('reload_jitter')
class Zonemgr:
"""Zone manager class."""
@@ -474,8 +515,8 @@ class Zonemgr:
return db_file
def shutdown(self):
- """Shutdown the zonemgr process. the thread which is keeping track of zone
- timers should be terminated.
+ """Shutdown the zonemgr process. The thread which is keeping track of
+ zone timers should be terminated.
"""
self._zone_refresh.shutdown()
@@ -515,17 +556,17 @@ class Zonemgr:
# jitter should not be bigger than half of the original value
if config_data.get('refresh_jitter') > 0.5:
config_data['refresh_jitter'] = 0.5
- log_msg("[b10-zonemgr] refresh_jitter is too big, its value will "
- "be set to 0.5")
-
+ logger.warn(ZONEMGR_JITTER_TOO_BIG)
def _parse_cmd_params(self, args, command):
zone_name = args.get("zone_name")
if not zone_name:
+ logger.error(ZONEMGR_NO_ZONE_NAME)
raise ZonemgrException("zone name should be provided")
zone_class = args.get("zone_class")
if not zone_class:
+ logger.error(ZONEMGR_NO_ZONE_CLASS)
raise ZonemgrException("zone class should be provided")
if (command != ZONE_NOTIFY_COMMAND):
@@ -533,6 +574,7 @@ class Zonemgr:
master_str = args.get("master")
if not master_str:
+ logger.error(ZONEMGR_NO_MASTER_ADDRESS)
raise ZonemgrException("master address should be provided")
return ((zone_name, zone_class), master_str)
@@ -540,15 +582,16 @@ class Zonemgr:
def command_handler(self, command, args):
"""Handle command receivd from command channel.
- ZONE_NOTIFY_COMMAND is issued by Auth process; ZONE_XFRIN_SUCCESS_COMMAND
- and ZONE_XFRIN_FAILED_COMMAND are issued by Xfrin process; shutdown is issued
- by a user or Boss process. """
+ ZONE_NOTIFY_COMMAND is issued by Auth process;
+ ZONE_XFRIN_SUCCESS_COMMAND and ZONE_XFRIN_FAILED_COMMAND are issued by
+ Xfrin process;
+ shutdown is issued by a user or Boss process. """
answer = create_answer(0)
if command == ZONE_NOTIFY_COMMAND:
""" Handle Auth notify command"""
# master is the source sender of the notify message.
zone_name_class, master = self._parse_cmd_params(args, command)
- log_msg("Received notify command for zone (%s, %s)." % zone_name_class)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_NOTIFY, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_handle_notify(zone_name_class, master)
# Send notification to zonemgr timer thread
@@ -557,6 +600,7 @@ class Zonemgr:
elif command == ZONE_XFRIN_SUCCESS_COMMAND:
""" Handle xfrin success command"""
zone_name_class = self._parse_cmd_params(args, command)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_SUCCESS, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_refresh_success(zone_name_class)
self._master_socket.send(b" ")# make self._slave_socket readble
@@ -564,14 +608,17 @@ class Zonemgr:
elif command == ZONE_XFRIN_FAILED_COMMAND:
""" Handle xfrin fail command"""
zone_name_class = self._parse_cmd_params(args, command)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_FAILED, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_refresh_fail(zone_name_class)
self._master_socket.send(b" ")# make self._slave_socket readble
elif command == "shutdown":
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_SHUTDOWN)
self.shutdown()
else:
+ logger.warn(ZONEMGR_RECEIVE_UNKNOWN, str(command))
answer = create_answer(1, "Unknown command:" + str(command))
return answer
@@ -598,25 +645,29 @@ def set_cmd_options(parser):
if '__main__' == __name__:
try:
+ logger.debug(DBG_START_SHUT, ZONEMGR_STARTING)
parser = OptionParser()
set_cmd_options(parser)
(options, args) = parser.parse_args()
- VERBOSE_MODE = options.verbose
+ if options.verbose:
+ logger.set_severity("DEBUG", 99)
set_signal_handler()
zonemgrd = Zonemgr()
zonemgrd.run()
except KeyboardInterrupt:
- sys.stderr.write("[b10-zonemgr] exit zonemgr process\n")
+ logger.info(ZONEMGR_KEYBOARD_INTERRUPT)
+
except isc.cc.session.SessionError as e:
- sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
- "is the command channel daemon running?\n")
+ logger.error(ZONEMGR_SESSION_ERROR)
+
except isc.cc.session.SessionTimeout as e:
- sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
- "is the configuration manager running?\n")
+ logger.error(ZONEMGR_SESSION_TIMEOUT)
+
except isc.config.ModuleCCSessionError as e:
- sys.stderr.write("[b10-zonemgr] exit zonemgr process: %s\n" % str(e))
+ logger.error(ZONEMGR_CCSESSION_ERROR, str(e))
if zonemgrd and zonemgrd.running:
zonemgrd.shutdown()
+ logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
new file mode 100644
index 0000000..8abec5d
--- /dev/null
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -0,0 +1,145 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the zonemgr messages python module.
+
+% ZONEMGR_CCSESSION_ERROR command channel session error: %1
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+
+% ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+
+% ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+
+% ZONEMGR_LOAD_ZONE loading zone %1 (class %2)
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+
+% ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+
+% ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+
+% ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+
+% ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+
+% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+
+% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+
+% ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+
+% ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+
+% ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+
+% ZONEMGR_SELECT_ERROR error with select(): %1
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+
+% ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+
+% ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SHUTDOWN zone manager has shut down
+A debug message, output when the zone manager has shut down completely.
+
+% ZONEMGR_STARTING zone manager starting
+A debug message output when the zone manager starts up.
+
+% ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
diff --git a/win32build/VS2010/b10-host/b10-host.vcxproj b/win32build/VS2010/b10-host/b10-host.vcxproj
new file mode 100755
index 0000000..2982298
--- /dev/null
+++ b/win32build/VS2010/b10-host/b10-host.vcxproj
@@ -0,0 +1,98 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{7BA6A1D1-3C42-40B9-9336-342982DD408B}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>b10host</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <OutDir>$(Configuration)\</OutDir>
+ <TargetName>b10-host</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <OutDir>$(Configuration)\</OutDir>
+ <TargetName>b10-host</TargetName>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level4</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\lib\dns;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <CompileAs>CompileAsCpp</CompileAs>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <OutputFile>$(OutDir)b10-host.exe</OutputFile>
+ <AdditionalLibraryDirectories>..\libexceptions\Debug;..\libutil\Debug;..\libcryptolink\Debug;..\libdns++\Debug;..\..\..\..\botan\md10\Debug;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libcryptolink.lib;libdns++.lib;botan.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\lib\dns;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <CompileAs>CompileAsCpp</CompileAs>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <OutputFile>$(OutDir)b10-host.exe</OutputFile>
+ <AdditionalLibraryDirectories>..\libexceptions\Release;..\libutil\Release;..\libcryptolink\Release;..\libdns++\Release;..\..\..\..\botan\md10\Release;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libcryptolink.lib;libdns++.lib;botan.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\src\bin\host\host.cc" />
+ <ClCompile Include="..\..\getopt.cc" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\getopt.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-host/b10-host.vcxproj.filters b/win32build/VS2010/b10-host/b10-host.vcxproj.filters
new file mode 100755
index 0000000..895f739
--- /dev/null
+++ b/win32build/VS2010/b10-host/b10-host.vcxproj.filters
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ <Filter Include="Resource Files">
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\src\bin\host\host.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\getopt.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\getopt.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-host/b10-host.vcxproj.user b/win32build/VS2010/b10-host/b10-host.vcxproj.user
new file mode 100755
index 0000000..695b5c7
--- /dev/null
+++ b/win32build/VS2010/b10-host/b10-host.vcxproj.user
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-resolver/b10-resolver.vcxproj b/win32build/VS2010/b10-resolver/b10-resolver.vcxproj
new file mode 100755
index 0000000..6dfc21d
--- /dev/null
+++ b/win32build/VS2010/b10-resolver/b10-resolver.vcxproj
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{48B8644A-79FB-4D03-A981-1CA21FEE3286}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>b10resolver</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <OutDir>$(Configuration)\</OutDir>
+ <TargetName>b10-resolver</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <OutDir>$(Configuration)\</OutDir>
+ <TargetName>b10-resolver</TargetName>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level4</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\lib\dns;..\..\..\src\bin;..\..\..\src\bin\resolver;..\..\..\ext\asio;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <CompileAs>CompileAsCpp</CompileAs>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <OutputFile>$(OutDir)b10-resolver.exe</OutputFile>
+ <AdditionalLibraryDirectories>..\libexceptions\Debug;..\libutil\Debug;..\liblog\Debug;..\libcryptolink\Debug;..\libdns++\Debug;..\libcc\Debug;..\libcfgclient\Debug;..\libacl\Debug;..\libdnsacl\Debug;..\libasiolink\Debug;..\libasiodns\Debug;..\libnsas\Debug;..\libcache\Debug;..\libresolve\Debug;..\libserver_common\Debug;..\..\..\..\botan\md10\Debug;..\..\..\..\log4cplus\md10\Debug;%BOOST%\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libcc.lib;libcfgclient.lib;libacl.lib;libdnsacl.lib;libasiolink.lib;libasiodns.lib;libnsas.lib;libcache.lib;libresolve.lib;libserver_common.lib;botan.lib;log4cplusSD.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ <PreBuildEvent>
+ <Command>cd ..\..\..\src\bin\resolver
+..\..\..\win32build\VS2010\liblog_compiler\Release\message.exe resolver_messages.mes
+copy resolver.spec.pre.win32 resolver.spec
+copy spec_config.h.pre.win32 spec_config.h
+</Command>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\lib\dns;..\..\..\src\bin;..\..\..\src\bin\resolver;..\..\..\ext\asio;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <CompileAs>CompileAsCpp</CompileAs>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <OutputFile>$(OutDir)b10-resolver.exe</OutputFile>
+ <AdditionalLibraryDirectories>..\libexceptions\Release;..\libutil\Release;..\liblog\Release;..\libcryptolink\Release;..\libdns++\Release;..\libcc\Release;..\libcfgclient\Release;..\libacl\Release;..\libdnsacl\Release;..\libasiolink\Release;..\libasiodns\Release;..\libnsas\Release;..\libcache\Release;..\libresolve\Release;..\libserver_common\Release;..\..\..\..\botan\md10\Release;..\..\..\..\log4cplus\md10\Release;%BOOST%\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libcc.lib;libcfgclient.lib;libacl.lib;libdnsacl.lib;libasiolink.lib;libasiodns.lib;libnsas.lib;libcache.lib;libresolve.lib;libserver_common.lib;botan.lib;log4cplusS.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ <PreBuildEvent>
+ <Command>cd ..\..\..\src\bin\resolver
+..\..\..\win32build\VS2010\liblog_compiler\Release\message.exe resolver_messages.mes
+copy resolver.spec.pre.win32 resolver.spec
+copy spec_config.h.pre.win32 spec_config.h
+</Command>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_log.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_messages.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\response_scrubber.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\spec_config.h" />
+ <ClInclude Include="..\..\getopt.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\src\bin\resolver\main.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_log.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_messages.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\response_scrubber.cc" />
+ <ClCompile Include="..\..\getopt.cc" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-resolver/b10-resolver.vcxproj.filters b/win32build/VS2010/b10-resolver/b10-resolver.vcxproj.filters
new file mode 100755
index 0000000..6ba67d9
--- /dev/null
+++ b/win32build/VS2010/b10-resolver/b10-resolver.vcxproj.filters
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ <Filter Include="Resource Files">
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_log.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\response_scrubber.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_messages.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\spec_config.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\getopt.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\src\bin\resolver\main.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_log.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\response_scrubber.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_messages.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\getopt.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-resolver/b10-resolver.vcxproj.user b/win32build/VS2010/b10-resolver/b10-resolver.vcxproj.user
new file mode 100755
index 0000000..695b5c7
--- /dev/null
+++ b/win32build/VS2010/b10-resolver/b10-resolver.vcxproj.user
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj b/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj
new file mode 100755
index 0000000..421bef8
--- /dev/null
+++ b/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{1ADF152B-0153-424D-B195-5821B30128D6}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>b10resolver_tests</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <OutDir>$(Configuration)\</OutDir>
+ <TargetName>run_unittests</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <OutDir>$(Configuration)\</OutDir>
+ <TargetName>run_unittests</TargetName>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level4</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;TEST_DATA_DIR="c:/cygwin/home/fdupont/bind10.trac826/src/lib/testutils/testdata/";TEST_DATA_BUILDDIR="c:/cygwin/home/fdupont/bind10.trac826/src/lib/testutils/testdata/";_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\bin;..\..\..\ext\asio;..\..\..\..\gtest\include;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <CompileAs>CompileAsCpp</CompileAs>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <OutputFile>$(OutDir)run_unittests.exe</OutputFile>
+ <AdditionalLibraryDirectories>..\libexceptions\Debug;..\libutil\Debug;..\libutil_unittests\Debug;..\liblog\Debug;..\libcryptolink\Debug;..\libdns++\Debug;..\libcc\Debug;..\libcfgclient\Debug;..\libacl\Debug;..\libdnsacl\Debug;..\libasiolink\Debug;..\libasiodns\Debug;..\libnsas\Debug;..\libcache\Debug;..\libresolve\Debug;..\libtestutils\Debug;..\libserver_common\Debug;..\..\..\..\botan\md10\Debug;..\..\..\..\log4cplus\md10\Debug;..\..\..\..\gtest\md10\Debug;%BOOST%/lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libcc.lib;libcfgclient.lib;libacl.lib;libdnsacl.lib;libasiolink.lib;libasiodns.lib;libnsas.lib;libcache.lib;libresolve.lib;libtestutils.lib;libserver_common.lib;botan.lib;log4cplusSD.lib;gtestd.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;TEST_DATA_DIR="c:/cygwin/home/fdupont/bind10.trac826/src/lib/testutils/testdata/";TEST_DATA_BUILDDIR="c:/cygwin/home/fdupont/bind10.trac826/src/lib/testutils/testdata/";NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..;..\..\..\src\lib;..\..\..\src\bin;..\..\..\ext\asio;..\..\..\..\gtest\include;%BOOST%;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <CompileAs>CompileAsCpp</CompileAs>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <OutputFile>$(OutDir)run_unittests.exe</OutputFile>
+ <AdditionalLibraryDirectories>..\libexceptions\Release;..\libutil\Release;..\libutil_unittests\Release;..\liblog\Release;..\libcryptolink\Release;..\libdns++\Release;..\libcc\Release;..\libcfgclient\Release;..\libacl\Release;..\libdnsacl\Release;..\libasiolink\Release;..\libasiodns\Release;..\libnsas\Release;..\libcache\Release;..\libresolve\Release;..\libtestutils\Release;..\libserver_common\Release;..\..\..\..\botan\md10\Release;..\..\..\..\log4cplus\md10\Release;..\..\..\..\gtest\md10\Release;%BOOST%/lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>libexceptions.lib;libutil.lib;libutil_unittests.lib;liblog.lib;libcryptolink.lib;libdns++.lib;libcc.lib;libcfgclient.lib;libacl.lib;libdnsacl.lib;libasiolink.lib;libasiodns.lib;libnsas.lib;libcache.lib;libresolve.lib;libtestutils.lib;libserver_common.lib;botan.lib;log4cplusS.lib;gtest.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_log.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_messages.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\response_scrubber.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\resolver_config_unittest.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\resolver_unittest.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\response_scrubber_unittest.cc" />
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\run_unittests.cc" />
+ <ClCompile Include="..\..\..\src\lib\dns\tests\unittest_util.cc" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_log.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_messages.h" />
+ <ClInclude Include="..\..\..\src\bin\resolver\response_scrubber.h" />
+ <ClInclude Include="..\..\..\src\lib\dns\tests\unittest_util.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.filters b/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.filters
new file mode 100755
index 0000000..157f58a
--- /dev/null
+++ b/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.filters
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ <Filter Include="Resource Files">
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\resolver_config_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\resolver_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\response_scrubber_unittest.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\tests\run_unittests.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_log.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\resolver_messages.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\bin\resolver\response_scrubber.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\src\lib\dns\tests\unittest_util.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_log.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\resolver_messages.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\bin\resolver\response_scrubber.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\src\lib\dns\tests\unittest_util.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.user b/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.user
new file mode 100755
index 0000000..695b5c7
--- /dev/null
+++ b/win32build/VS2010/b10-resolver_tests/b10-resolver_tests.vcxproj.user
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+</Project>
\ No newline at end of file
diff --git a/win32build/VS2010/bind10.sln b/win32build/VS2010/bind10.sln
index 21a9ad5..30c9d6f 100755
--- a/win32build/VS2010/bind10.sln
+++ b/win32build/VS2010/bind10.sln
@@ -13,11 +13,13 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "BINDInstall", "BINDInstall\
{831EDB24-3B0A-44AE-A192-6F3DEA1A9BA6} = {831EDB24-3B0A-44AE-A192-6F3DEA1A9BA6}
{33927325-C9B5-4FE6-B69F-318433AFF4BD} = {33927325-C9B5-4FE6-B69F-318433AFF4BD}
{D781E925-FE57-4C75-9E26-BBC102B6A24A} = {D781E925-FE57-4C75-9E26-BBC102B6A24A}
+ {1ADF152B-0153-424D-B195-5821B30128D6} = {1ADF152B-0153-424D-B195-5821B30128D6}
{7D04222B-643C-446C-A2B8-93AF74A86246} = {7D04222B-643C-446C-A2B8-93AF74A86246}
{DCF4ED2E-FFD1-4432-AFEF-8D6EC96B79A2} = {DCF4ED2E-FFD1-4432-AFEF-8D6EC96B79A2}
{D728B037-F63F-4CB5-B840-9AC54BDFBED5} = {D728B037-F63F-4CB5-B840-9AC54BDFBED5}
{6D55503E-0B43-4273-B6A5-4CEB39E114AC} = {6D55503E-0B43-4273-B6A5-4CEB39E114AC}
{13215E3E-E75D-463D-A0EF-93A1C9A20896} = {13215E3E-E75D-463D-A0EF-93A1C9A20896}
+ {48B8644A-79FB-4D03-A981-1CA21FEE3286} = {48B8644A-79FB-4D03-A981-1CA21FEE3286}
{635B804D-1B52-433E-9ECD-84F507FDB1F1} = {635B804D-1B52-433E-9ECD-84F507FDB1F1}
{67046450-CCEA-4CAC-A05B-17516F3FB540} = {67046450-CCEA-4CAC-A05B-17516F3FB540}
{55BCB364-62B0-4F93-8B88-38F3349B22C8} = {55BCB364-62B0-4F93-8B88-38F3349B22C8}
@@ -45,6 +47,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "BINDInstall", "BINDInstall\
{2E64F6CC-3AD9-4DA7-8E05-ABBB83F9AFC4} = {2E64F6CC-3AD9-4DA7-8E05-ABBB83F9AFC4}
{F27BC0D0-A334-4DC0-9DC9-880D5DA74524} = {F27BC0D0-A334-4DC0-9DC9-880D5DA74524}
{AC4806D1-C2CC-444B-8F0D-209851A969D2} = {AC4806D1-C2CC-444B-8F0D-209851A969D2}
+ {7BA6A1D1-3C42-40B9-9336-342982DD408B} = {7BA6A1D1-3C42-40B9-9336-342982DD408B}
{65B0B6D2-94CE-4A21-85E7-A047C79044F9} = {65B0B6D2-94CE-4A21-85E7-A047C79044F9}
{F6E728D3-A0B2-40F6-9B91-7D4474D778F3} = {F6E728D3-A0B2-40F6-9B91-7D4474D778F3}
{564B0ADE-76A4-4833-9610-8DEEA6A15423} = {564B0ADE-76A4-4833-9610-8DEEA6A15423}
@@ -448,6 +451,56 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pyddnsacl", "pyddnsacl\pydd
{CC29C1F9-A77B-476C-803E-8830F8312571} = {CC29C1F9-A77B-476C-803E-8830F8312571}
EndProjectSection
EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "b10-host", "b10-host\b10-host.vcxproj", "{7BA6A1D1-3C42-40B9-9336-342982DD408B}"
+ ProjectSection(ProjectDependencies) = postProject
+ {7D04222B-643C-446C-A2B8-93AF74A86246} = {7D04222B-643C-446C-A2B8-93AF74A86246}
+ {13215E3E-E75D-463D-A0EF-93A1C9A20896} = {13215E3E-E75D-463D-A0EF-93A1C9A20896}
+ {8F120666-1A69-4506-8546-0F665E80FFB7} = {8F120666-1A69-4506-8546-0F665E80FFB7}
+ {F6E728D3-A0B2-40F6-9B91-7D4474D778F3} = {F6E728D3-A0B2-40F6-9B91-7D4474D778F3}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "b10-resolver", "b10-resolver\b10-resolver.vcxproj", "{48B8644A-79FB-4D03-A981-1CA21FEE3286}"
+ ProjectSection(ProjectDependencies) = postProject
+ {69048307-9655-4AAA-B07E-B67345C1DEF9} = {69048307-9655-4AAA-B07E-B67345C1DEF9}
+ {3FFD260A-C606-49D1-A34F-74B78D8DC76F} = {3FFD260A-C606-49D1-A34F-74B78D8DC76F}
+ {7D04222B-643C-446C-A2B8-93AF74A86246} = {7D04222B-643C-446C-A2B8-93AF74A86246}
+ {13215E3E-E75D-463D-A0EF-93A1C9A20896} = {13215E3E-E75D-463D-A0EF-93A1C9A20896}
+ {635B804D-1B52-433E-9ECD-84F507FDB1F1} = {635B804D-1B52-433E-9ECD-84F507FDB1F1}
+ {67046450-CCEA-4CAC-A05B-17516F3FB540} = {67046450-CCEA-4CAC-A05B-17516F3FB540}
+ {8F120666-1A69-4506-8546-0F665E80FFB7} = {8F120666-1A69-4506-8546-0F665E80FFB7}
+ {F8616086-9CE9-4F32-BC97-8494EADAEC6F} = {F8616086-9CE9-4F32-BC97-8494EADAEC6F}
+ {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1} = {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1}
+ {813BA1C9-8CD8-4B06-B1C0-FDAB576AC4B6} = {813BA1C9-8CD8-4B06-B1C0-FDAB576AC4B6}
+ {2E64F6CC-3AD9-4DA7-8E05-ABBB83F9AFC4} = {2E64F6CC-3AD9-4DA7-8E05-ABBB83F9AFC4}
+ {F6E728D3-A0B2-40F6-9B91-7D4474D778F3} = {F6E728D3-A0B2-40F6-9B91-7D4474D778F3}
+ {7EB244E7-D381-4CF4-A2D4-739B81F77588} = {7EB244E7-D381-4CF4-A2D4-739B81F77588}
+ {66C9A5EC-514B-4BDC-AC74-ED4CB465CAAF} = {66C9A5EC-514B-4BDC-AC74-ED4CB465CAAF}
+ {CC29C1F9-A77B-476C-803E-8830F8312571} = {CC29C1F9-A77B-476C-803E-8830F8312571}
+ {AEF3DFFE-B566-4E6A-B299-B59B81022C06} = {AEF3DFFE-B566-4E6A-B299-B59B81022C06}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "b10-resolver_tests", "b10-resolver_tests\b10-resolver_tests.vcxproj", "{1ADF152B-0153-424D-B195-5821B30128D6}"
+ ProjectSection(ProjectDependencies) = postProject
+ {69048307-9655-4AAA-B07E-B67345C1DEF9} = {69048307-9655-4AAA-B07E-B67345C1DEF9}
+ {3FFD260A-C606-49D1-A34F-74B78D8DC76F} = {3FFD260A-C606-49D1-A34F-74B78D8DC76F}
+ {7D04222B-643C-446C-A2B8-93AF74A86246} = {7D04222B-643C-446C-A2B8-93AF74A86246}
+ {13215E3E-E75D-463D-A0EF-93A1C9A20896} = {13215E3E-E75D-463D-A0EF-93A1C9A20896}
+ {48B8644A-79FB-4D03-A981-1CA21FEE3286} = {48B8644A-79FB-4D03-A981-1CA21FEE3286}
+ {635B804D-1B52-433E-9ECD-84F507FDB1F1} = {635B804D-1B52-433E-9ECD-84F507FDB1F1}
+ {67046450-CCEA-4CAC-A05B-17516F3FB540} = {67046450-CCEA-4CAC-A05B-17516F3FB540}
+ {55BCB364-62B0-4F93-8B88-38F3349B22C8} = {55BCB364-62B0-4F93-8B88-38F3349B22C8}
+ {8F120666-1A69-4506-8546-0F665E80FFB7} = {8F120666-1A69-4506-8546-0F665E80FFB7}
+ {F8616086-9CE9-4F32-BC97-8494EADAEC6F} = {F8616086-9CE9-4F32-BC97-8494EADAEC6F}
+ {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1} = {EB54F7B8-FAEF-4348-989C-D4E6B42CEFB1}
+ {813BA1C9-8CD8-4B06-B1C0-FDAB576AC4B6} = {813BA1C9-8CD8-4B06-B1C0-FDAB576AC4B6}
+ {F6E728D3-A0B2-40F6-9B91-7D4474D778F3} = {F6E728D3-A0B2-40F6-9B91-7D4474D778F3}
+ {7EB244E7-D381-4CF4-A2D4-739B81F77588} = {7EB244E7-D381-4CF4-A2D4-739B81F77588}
+ {66C9A5EC-514B-4BDC-AC74-ED4CB465CAAF} = {66C9A5EC-514B-4BDC-AC74-ED4CB465CAAF}
+ {CC29C1F9-A77B-476C-803E-8830F8312571} = {CC29C1F9-A77B-476C-803E-8830F8312571}
+ {2844FDFB-A0A1-4FA4-A654-15D69CC717DD} = {2844FDFB-A0A1-4FA4-A654-15D69CC717DD}
+ {AEF3DFFE-B566-4E6A-B299-B59B81022C06} = {AEF3DFFE-B566-4E6A-B299-B59B81022C06}
+ EndProjectSection
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
@@ -678,6 +731,18 @@ Global
{47052F67-D9A0-433E-A074-5E50247B5F48}.Debug|Win32.Build.0 = Debug|Win32
{47052F67-D9A0-433E-A074-5E50247B5F48}.Release|Win32.ActiveCfg = Release|Win32
{47052F67-D9A0-433E-A074-5E50247B5F48}.Release|Win32.Build.0 = Release|Win32
+ {7BA6A1D1-3C42-40B9-9336-342982DD408B}.Debug|Win32.ActiveCfg = Debug|Win32
+ {7BA6A1D1-3C42-40B9-9336-342982DD408B}.Debug|Win32.Build.0 = Debug|Win32
+ {7BA6A1D1-3C42-40B9-9336-342982DD408B}.Release|Win32.ActiveCfg = Release|Win32
+ {7BA6A1D1-3C42-40B9-9336-342982DD408B}.Release|Win32.Build.0 = Release|Win32
+ {48B8644A-79FB-4D03-A981-1CA21FEE3286}.Debug|Win32.ActiveCfg = Debug|Win32
+ {48B8644A-79FB-4D03-A981-1CA21FEE3286}.Debug|Win32.Build.0 = Debug|Win32
+ {48B8644A-79FB-4D03-A981-1CA21FEE3286}.Release|Win32.ActiveCfg = Release|Win32
+ {48B8644A-79FB-4D03-A981-1CA21FEE3286}.Release|Win32.Build.0 = Release|Win32
+ {1ADF152B-0153-424D-B195-5821B30128D6}.Debug|Win32.ActiveCfg = Debug|Win32
+ {1ADF152B-0153-424D-B195-5821B30128D6}.Debug|Win32.Build.0 = Debug|Win32
+ {1ADF152B-0153-424D-B195-5821B30128D6}.Release|Win32.ActiveCfg = Release|Win32
+ {1ADF152B-0153-424D-B195-5821B30128D6}.Release|Win32.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
More information about the bind10-changes
mailing list