BIND 10 trac1061, updated. 63f4617b5ab99d75e98e40760ff68bb1615a84e6 [trac1061] Doxygen comments for database classes
BIND 10 source code commits
bind10-changes at lists.isc.org
Mon Aug 1 08:48:49 UTC 2011
The branch, trac1061 has been updated
discards 29f8206c55da69ea889ee4bd012f28c28ba90fe4 (commit)
discards 03c00dfe4e92b1ff0d434da7218ef16a1f3a5202 (commit)
via 63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
via 579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)
via 9cc8edcca2ab13145a954b44101f7058142d4ac1 (commit)
via dd7e5d47df1e9af687cdc87c2d2595893eefec12 (commit)
via 8907c6a5c71816483099683e0ddcaf11cf3a7912 (commit)
via 06aeefc4787c82db7f5443651f099c5af47bd4d6 (commit)
via ced9ddecf6b8f7777125b8d4d2ef1b24ccad34cd (commit)
via c18502d5a89af081b1cd4c4b1c112f9458056124 (commit)
via ee4916a2db7ff1217c0af65f03220583b80b4568 (commit)
via 589965360a98152e8c783e4736080e06a895feb0 (commit)
via cb86d16418ced44b148726104c5c8f9d36a3be49 (commit)
via f279d996354eded4defa219a393efa362e157406 (commit)
via 69336de84b2ae1b5b6a59fa8d817daa1108cea27 (commit)
via 12186e267fb75a77027dc046f78db6ace99b8571 (commit)
via c62810c526d75363ed4d668bbdb6b21a5a294a7b (commit)
via 0710846d8d7a38079b9570aeec9abfb94341af79 (commit)
via 9517f61cb8ad4f8074b5e6e33c663ca9ed581908 (commit)
via 3da7e8747dcea9b45c8bc4c17b946be7d5ff9576 (commit)
via 900a3c5828be90bfce2a7b8e2e6edc0d4509df6a (commit)
via d9e757fb15b711464cfc8ba344f2563f3e2b9195 (commit)
via 517c31a58af1f7b97f308e77caeb8cbe9ef99cf1 (commit)
via 4c485d0b112721d3a2b2939ab61db14b7608c98c (commit)
via be388eb699a8517595ea921082b5ded2d1450dcc (commit)
via 0711c996f017cabe220dd291500bb1b202f21e1f (commit)
via 9b2e89cabb6191db86f88ee717f7abc4171fa979 (commit)
via 566d284cd664a78255f5fbc8881ee8996f835960 (commit)
via 8d8c3bc259f8b549a2fbace562afb0984cd427ba (commit)
via af698f41e199e4942d818accb0cc0ad7589785e8 (commit)
via 6300d968db6e857e199cf8e4701988bf2f9136a2 (commit)
via 49d5415d994ab0807daeaacf5e30f9186ca72ff5 (commit)
via 6a204908cb3f11ba7635d5e0a97a196856fb5748 (commit)
via 489f9a3bf2078969f746a47a49fdc17d94f898d3 (commit)
via 7b55eb02488353672fad7160148a40e581cb5c80 (commit)
via 67f6e4baa87b5555f3bc13919707a3f3180d57f4 (commit)
via c0a78a899ad3d96bcfe15715e957eebdb71ecca4 (commit)
via 6ba745463f9f54496a2f9c2b1a407ab40844bbd4 (commit)
via 18d0a74b6464ffbe036c41e706d3130a69a38313 (commit)
via ae1cf18d06bfc92ba1803ad8bb7c90be844f491e (commit)
via 26e04c45efa440353cd75365c499fc06ba1eb4ea (commit)
via 42017c858f5e08f1544620342404904c36d12625 (commit)
via fafb108c231295b40b7b0d0ea86caff5031a0c30 (commit)
via 136adbdab133d19bf900036b3786d5f709ab2082 (commit)
via e108ea6f210bf93250ad4ea23ac3708e1478946e (commit)
via 1f26ac530c0ca072ff0de69093d38c95b9d3c80a (commit)
via 71fb105407d496134f0cfcbea73eaea9991dbcf5 (commit)
via 834d48869745039bbd874d76bcafb4ac6ce7a4e8 (commit)
via 7e0ef7c21ad41f0e3047059fef61ddbefe143444 (commit)
via 7cc84b1bfe00402ea12749c63c7e4d8cef5b2431 (commit)
via 2cd7eb5d2c64c6a54350e6399f07fd4826933bff (commit)
via 4f17845a927e33ad9655c3f711177e376bc10e44 (commit)
via 84a16612dd45bcaca490715039b1bec235e0dfef (commit)
via d4dce83017319569f35e617dae47af9041166239 (commit)
via 3b30727c4ae0b4febedb9795752352bf5154730a (commit)
via bf635ee41af43f357b285ab97f04f72b37e8fb64 (commit)
This update added new revisions after undoing existing revisions. That is
to say, the old revision is not a strict subset of the new revision. This
situation occurs when you --force push a change and generate a repository
containing something like this:
* -- * -- B -- O -- O -- O (29f8206c55da69ea889ee4bd012f28c28ba90fe4)
\
N -- N -- N (63f4617b5ab99d75e98e40760ff68bb1615a84e6)
When this happens we assume that you've already had alert emails for all
of the O revisions, and so we here report only the revisions in the N
branch from the common base, B.
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 63f4617b5ab99d75e98e40760ff68bb1615a84e6
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Jul 28 15:42:45 2011 +0200
[trac1061] Doxygen comments for database classes
commit 579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Jul 28 13:47:17 2011 +0200
[trac1061] Interface of the database connection and client
It will look something like this, hopefully. Let's see if it works.
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 10 +
configure.ac | 5 +-
ext/asio/asio/impl/error_code.ipp | 3 +
src/bin/auth/command.cc | 6 +-
src/bin/bind10/Makefile.am | 10 +-
src/bin/{stats/tests/http => bind10}/__init__.py | 0
src/bin/bind10/bind10_messages.mes | 65 ++++-
src/bin/bind10/{bind10.py.in => bind10_src.py.in} | 34 ++-
src/bin/bind10/run_bind10.sh.in | 4 +-
src/bin/bind10/sockcreator.py | 226 ++++++++++++++
src/bin/bind10/tests/Makefile.am | 7 +-
src/bin/bind10/tests/bind10_test.py.in | 10 +-
src/bin/bind10/tests/sockcreator_test.py.in | 327 ++++++++++++++++++++
src/bin/bindctl/bindcmd.py | 21 +-
src/bin/bindctl/bindctl_main.py.in | 19 +-
src/bin/dhcp6/tests/Makefile.am | 4 +-
src/bin/dhcp6/tests/dhcp6_test.py | 2 +-
src/bin/resolver/resolver.cc | 3 +-
src/bin/resolver/tests/resolver_config_unittest.cc | 3 +-
src/bin/sockcreator/README | 2 +-
src/bin/xfrout/tests/xfrout_test.py.in | 51 +++-
src/bin/xfrout/xfrout.py.in | 3 +-
src/lib/acl/Makefile.am | 2 +-
src/lib/acl/dns.cc | 25 ++-
src/lib/acl/dns.h | 24 ++-
src/lib/acl/dnsname_check.h | 83 +++++
src/lib/acl/tests/Makefile.am | 2 +
src/lib/acl/tests/dns_test.cc | 86 +++++-
src/lib/acl/tests/dnsname_check_unittest.cc | 59 ++++
src/lib/cc/data.cc | 2 +
src/lib/config/module_spec.cc | 26 ++-
src/lib/config/tests/module_spec_unittests.cc | 9 +
src/lib/config/tests/testdata/Makefile.am | 4 +
src/lib/config/tests/testdata/data32_1.data | 3 +
src/lib/config/tests/testdata/data32_2.data | 3 +
src/lib/config/tests/testdata/data32_3.data | 3 +
src/lib/config/tests/testdata/spec32.spec | 19 ++
src/lib/datasrc/rbtree.h | 6 +-
src/lib/dns/rrtype-placeholder.h | 5 +
src/lib/python/isc/acl/Makefile.am | 24 +-
src/lib/python/isc/acl/{acl.py => _dns.py} | 2 +-
src/lib/python/isc/acl/dns.cc | 4 +-
src/lib/python/isc/acl/dns.py | 76 ++++-
src/lib/python/isc/acl/dns_requestacl_python.cc | 4 +-
src/lib/python/isc/acl/dns_requestcontext_inc.cc | 19 +-
.../python/isc/acl/dns_requestcontext_python.cc | 129 ++++++--
src/lib/python/isc/acl/dns_requestloader_python.cc | 4 +-
src/lib/python/isc/acl/tests/Makefile.am | 2 +-
src/lib/python/isc/acl/tests/dns_test.py | 87 +++++-
src/lib/python/isc/cc/data.py | 18 +-
src/lib/python/isc/config/ccsession.py | 152 +++++++---
src/lib/python/isc/config/config_data.py | 129 +++++++-
src/lib/python/isc/config/module_spec.py | 18 +-
src/lib/python/isc/config/tests/ccsession_test.py | 35 ++-
.../python/isc/config/tests/config_data_test.py | 55 ++++-
.../python/isc/config/tests/module_spec_test.py | 3 +
56 files changed, 1727 insertions(+), 210 deletions(-)
copy src/bin/{stats/tests/http => bind10}/__init__.py (100%)
rename src/bin/bind10/{bind10.py.in => bind10_src.py.in} (96%)
create mode 100644 src/bin/bind10/sockcreator.py
create mode 100644 src/bin/bind10/tests/sockcreator_test.py.in
create mode 100644 src/lib/acl/dnsname_check.h
create mode 100644 src/lib/acl/tests/dnsname_check_unittest.cc
create mode 100644 src/lib/config/tests/testdata/data32_1.data
create mode 100644 src/lib/config/tests/testdata/data32_2.data
create mode 100644 src/lib/config/tests/testdata/data32_3.data
create mode 100644 src/lib/config/tests/testdata/spec32.spec
copy src/lib/python/isc/acl/{acl.py => _dns.py} (98%)
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 8f86551..41c9faa 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+275. [func] jinmei
+ Added support for TSIG key matching in ACLs. The xfrout ACL can
+ now refer to TSIG key names using the "key" attribute. For
+ example, the following specifies an ACL that allows zone transfer
+ if and only if the request is signed with a TSIG of a key name
+ "key.example":
+ > config set Xfrout/query_acl[0] {"action": "ACCEPT", \
+ "key": "key.example"}
+ (Trac #1104, git 9b2e89cabb6191db86f88ee717f7abc4171fa979)
+
274. [bug] naokikambe
add unittests for functions xml_handler, xsd_handler and xsl_handler
respectively to make sure their behaviors are correct, regardless of
diff --git a/configure.ac b/configure.ac
index 48a79d2..0ede949 100644
--- a/configure.ac
+++ b/configure.ac
@@ -270,6 +270,8 @@ B10_CXXFLAGS="-Wall -Wextra -Wwrite-strings -Woverloaded-virtual -Wno-sign-compa
case "$host" in
*-solaris*)
MULTITHREADING_FLAG=-pthreads
+ # In Solaris, IN6ADDR_ANY_INIT and IN6ADDR_LOOPBACK_INIT need -Wno-missing-braces
+ B10_CXXFLAGS="$B10_CXXFLAGS -Wno-missing-braces"
;;
*)
MULTITHREADING_FLAG=-pthread
@@ -902,9 +904,10 @@ AC_OUTPUT([doc/version.ent
src/bin/zonemgr/run_b10-zonemgr.sh
src/bin/stats/stats.py
src/bin/stats/stats_httpd.py
- src/bin/bind10/bind10.py
+ src/bin/bind10/bind10_src.py
src/bin/bind10/run_bind10.sh
src/bin/bind10/tests/bind10_test.py
+ src/bin/bind10/tests/sockcreator_test.py
src/bin/bindctl/run_bindctl.sh
src/bin/bindctl/bindctl_main.py
src/bin/bindctl/tests/bindctl_test
diff --git a/ext/asio/asio/impl/error_code.ipp b/ext/asio/asio/impl/error_code.ipp
index ed37a17..218c09b 100644
--- a/ext/asio/asio/impl/error_code.ipp
+++ b/ext/asio/asio/impl/error_code.ipp
@@ -11,6 +11,9 @@
#ifndef ASIO_IMPL_ERROR_CODE_IPP
#define ASIO_IMPL_ERROR_CODE_IPP
+// strerror() needs <cstring>
+#include <cstring>
+
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index 0944005..940d57b 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -139,10 +139,10 @@ public:
shared_ptr<InMemoryZoneFinder> zone_finder(
new InMemoryZoneFinder(old_zone_finder->getClass(),
old_zone_finder->getOrigin()));
- newzone->load(old_zone_finder->getFileName());
- old_zone_finder->swap(*newzone);
+ zone_finder->load(old_zone_finder->getFileName());
+ old_zone_finder->swap(*zone_finder);
LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
- .arg(newzone->getOrigin()).arg(newzone->getClass());
+ .arg(zone_finder->getOrigin()).arg(zone_finder->getClass());
}
private:
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 126c429..1a5ce64 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,7 +1,11 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10.pyc bind10_messages.py bind10_messages.pyc
+CLEANFILES = bind10 bind10_src.pyc bind10_messages.py bind10_messages.pyc \
+ sockcreator.pyc
+
+python_PYTHON = __init__.py sockcreator.py
+pythondir = $(pyexecdir)/bind10
pkglibexecdir = $(libexecdir)/@PACKAGE@
pyexec_DATA = bind10_messages.py
@@ -24,9 +28,9 @@ bind10_messages.py: bind10_messages.mes
$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/bind10/bind10_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10.py
+bind10: bind10_src.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10.py >$@
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
chmod a+x $@
pytest:
diff --git a/src/bin/bind10/__init__.py b/src/bin/bind10/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/bin/bind10/bind10.py.in b/src/bin/bind10/bind10.py.in
deleted file mode 100755
index a624383..0000000
--- a/src/bin/bind10/bind10.py.in
+++ /dev/null
@@ -1,1037 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
- PREFIX = "@prefix@"
- DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-
-import isc.cc
-import isc.util.process
-import isc.net.parse
-import isc.log
-from bind10_messages import *
-
-isc.log.init("b10-boss")
-logger = isc.log.Logger("boss")
-
-# Pending system-wide debug level definitions, the ones we
-# use here are hardcoded for now
-DBG_PROCESS = 10
-DBG_COMMANDS = 30
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for bind10.boottime of stats module
-_BASETIME = time.gmtime()
-
-class RestartSchedule:
- """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
- * If a process was been running for >=10 seconds, we restart it
- right away.
- * If a process was running for <10 seconds, we wait until 10 seconds
- after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
- def __init__(self, restart_frequency=10.0):
- self.restart_frequency = restart_frequency
- self.run_start_time = None
- self.run_stop_time = None
- self.restart_time = None
-
- def set_run_start_time(self, when=None):
- if when is None:
- when = time.time()
- self.run_start_time = when
- sigma = self.restart_frequency * 0.05
- self.restart_time = when + random.normalvariate(self.restart_frequency,
- sigma)
-
- def set_run_stop_time(self, when=None):
- """We don't actually do anything with stop time now, but it
- might be useful for future algorithms."""
- if when is None:
- when = time.time()
- self.run_stop_time = when
-
- def get_restart_time(self, when=None):
- if when is None:
- when = time.time()
- return max(when, self.restart_time)
-
-class ProcessInfoError(Exception): pass
-
-class ProcessInfo:
- """Information about a process"""
-
- dev_null = open(os.devnull, "w")
-
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False, uid=None, username=None):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.restart_schedule = RestartSchedule()
- self.uid = uid
- self.username = username
- self.process = None
- self.pid = None
-
- def _preexec_work(self):
- """Function used before running a program that needs to run as a
- different user."""
- # First, put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
- # other means).
- os.setpgrp()
- # Second, set the user ID if one has been specified
- if self.uid is not None:
- try:
- posix.setuid(self.uid)
- except OSError as e:
- if e.errno == errno.EPERM:
- # if we failed to change user due to permission report that
- raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
- else:
- # otherwise simply re-raise whatever error we found
- raise
-
- def _spawn(self):
- if self.dev_null_stdout:
- spawn_stdout = self.dev_null
- else:
- spawn_stdout = None
- if self.dev_null_stderr:
- spawn_stderr = self.dev_null
- else:
- spawn_stderr = None
- # Environment variables for the child process will be a copy of those
- # of the boss process with any additional specific variables given
- # on construction (self.env).
- spawn_env = os.environ
- spawn_env.update(self.env)
- if 'B10_FROM_SOURCE' not in os.environ:
- spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
- self.process = subprocess.Popen(self.args,
- stdin=subprocess.PIPE,
- stdout=spawn_stdout,
- stderr=spawn_stderr,
- close_fds=True,
- env=spawn_env,
- preexec_fn=self._preexec_work)
- self.pid = self.process.pid
- self.restart_schedule.set_run_start_time()
-
- # spawn() and respawn() are the same for now, but in the future they
- # may have different functionality
- def spawn(self):
- self._spawn()
-
- def respawn(self):
- self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class BoB:
- """Boss of BIND class."""
-
- def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, brittle=False):
- """
- Initialize the Boss of BIND. This is a singleton (only one can run).
-
- The msgq_socket_file specifies the UNIX domain socket file that the
- msgq process listens on. If verbose is True, then the boss reports
- what it is doing.
-
- Data path and config filename are passed trough to config manager
- (if provided) and specify the config file to be used.
-
- The cmdctl_port is passed to cmdctl and specify on which port it
- should listen.
- """
- self.cc_session = None
- self.ccs = None
- self.cfg_start_auth = True
- self.cfg_start_resolver = False
- self.cfg_start_dhcp6 = False
- self.cfg_start_dhcp4 = False
- self.started_auth_family = False
- self.started_resolver_family = False
- self.curproc = None
- self.dead_processes = {}
- self.msgq_socket_file = msgq_socket_file
- self.nocache = nocache
- self.processes = {}
- self.expected_shutdowns = {}
- self.runnable = False
- self.uid = setuid
- self.username = username
- self.verbose = verbose
- self.data_path = data_path
- self.config_filename = config_filename
- self.cmdctl_port = cmdctl_port
- self.brittle = brittle
-
- def config_handler(self, new_config):
- # If this is initial update, don't do anything now, leave it to startup
- if not self.runnable:
- return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- logger.info(BIND10_START_AS_NON_ROOT, name)
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.start_resolver(self.c_channel_env)
- self.started_resolver_family = True
- def resolver_off():
- self.stop_resolver()
- self.started_resolver_family = False
- def auth_on():
- self.start_auth(self.c_channel_env)
- self.start_xfrout(self.c_channel_env)
- self.start_xfrin(self.c_channel_env)
- self.start_zonemgr(self.c_channel_env)
- self.started_auth_family = True
- def auth_off():
- self.stop_zonemgr()
- self.stop_xfrin()
- self.stop_xfrout()
- self.stop_auth()
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
- logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
- new_config)
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
-
- def get_processes(self):
- pids = list(self.processes.keys())
- pids.sort()
- process_list = [ ]
- for pid in pids:
- process_list.append([pid, self.processes[pid].name])
- return process_list
-
- def command_handler(self, command, args):
- logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
- answer = isc.config.ccsession.create_answer(1, "command not implemented")
- if type(command) != str:
- answer = isc.config.ccsession.create_answer(1, "bad command")
- else:
- if command == "shutdown":
- self.runnable = False
- answer = isc.config.ccsession.create_answer(0)
- elif command == "sendstats":
- # send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }})
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- self.cc_session.group_recvmsg(True, seq)
- answer = isc.config.ccsession.create_answer(0)
- elif command == "ping":
- answer = isc.config.ccsession.create_answer(0, "pong")
- elif command == "show_processes":
- answer = isc.config.ccsession. \
- create_answer(0, self.get_processes())
- else:
- answer = isc.config.ccsession.create_answer(1,
- "Unknown command")
- return answer
-
- def kill_started_processes(self):
- """
- Called as part of the exception handling when a process fails to
- start, this runs through the list of started processes, killing
- each one. It then clears that list.
- """
- logger.info(BIND10_KILLING_ALL_PROCESSES)
-
- for pid in self.processes:
- logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
-
- def read_bind10_config(self):
- """
- Reads the parameters associated with the BoB module itself.
-
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
- """
- logger.info(BIND10_READING_BOSS_CONFIGURATION)
-
- config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
- logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
-
- def log_starting(self, process, port = None, address = None):
- """
- A convenience function to output a "Starting xxx" message if the
- logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
- Putting this into a separate method ensures
- that the output form is consistent across all processes.
-
- The process name (passed as the first argument) is put into
- self.curproc, and is used to indicate which process failed to
- start if there is an error (and is used in the "Started" message
- on success). The optional port and address information are
- appended to the message (if present).
- """
- self.curproc = process
- if port is None and address is None:
- logger.info(BIND10_STARTING_PROCESS, self.curproc)
- elif address is None:
- logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
- port)
- else:
- logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
- self.curproc, address, port)
-
- def log_started(self, pid = None):
- """
- A convenience function to output a 'Started xxxx (PID yyyy)'
- message. As with starting_message(), this ensures a consistent
- format.
- """
- if pid is None:
- logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
- else:
- logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
-
- # The next few methods start the individual processes of BIND-10. They
- # are called via start_all_processes(). If any fail, an exception is
- # raised which is caught by the caller of start_all_processes(); this kills
- # processes started up to that point before terminating the program.
-
- def start_msgq(self, c_channel_env):
- """
- Start the message queue and connect to the command channel.
- """
- self.log_starting("b10-msgq")
- c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
- True, not self.verbose, uid=self.uid,
- username=self.username)
- c_channel.spawn()
- self.processes[c_channel.pid] = c_channel
- self.log_started(c_channel.pid)
-
- # Now connect to the c-channel
- cc_connect_start = time.time()
- while self.cc_session is None:
- # if we have been trying for "a while" give up
- if (time.time() - cc_connect_start) > 5:
- raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- except isc.cc.session.SessionError:
- time.sleep(0.1)
-
- def start_cfgmgr(self, c_channel_env):
- """
- Starts the configuration manager process
- """
- self.log_starting("b10-cfgmgr")
- args = ["b10-cfgmgr"]
- if self.data_path is not None:
- args.append("--data-path=" + self.data_path)
- if self.config_filename is not None:
- args.append("--config-filename=" + self.config_filename)
- bind_cfgd = ProcessInfo("b10-cfgmgr", args,
- c_channel_env, uid=self.uid,
- username=self.username)
- bind_cfgd.spawn()
- self.processes[bind_cfgd.pid] = bind_cfgd
- self.log_started(bind_cfgd.pid)
-
- # sleep until b10-cfgmgr is fully up and running, this is a good place
- # to have a (short) timeout on synchronized groupsend/receive
- # TODO: replace the sleep by a listen for ConfigManager started
- # message
- time.sleep(1)
-
- def start_ccsession(self, c_channel_env):
- """
- Start the CC Session
-
- The argument c_channel_env is unused but is supplied to keep the
- argument list the same for all start_xxx methods.
- """
- self.log_starting("ccsession")
- self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler)
- self.ccs.start()
- self.log_started()
-
- # A couple of utility methods for starting processes...
-
- def start_process(self, name, args, c_channel_env, port=None, address=None):
- """
- Given a set of command arguments, start the process and output
- appropriate log messages. If the start is successful, the process
- is added to the list of started processes.
-
- The port and address arguments are for log messages only.
- """
- self.log_starting(name, port, address)
- newproc = ProcessInfo(name, args, c_channel_env)
- newproc.spawn()
- self.processes[newproc.pid] = newproc
- self.log_started(newproc.pid)
-
- def start_simple(self, name, c_channel_env, port=None, address=None):
- """
- Most of the BIND-10 processes are started with the command:
-
- <process-name> [-v]
-
- ... where -v is appended if verbose is enabled. This method
- generates the arguments from the name and starts the process.
-
- The port and address arguments are for log messages only.
- """
- # Set up the command arguments.
- args = [name]
- if self.verbose:
- args += ['-v']
-
- # ... and start the process
- self.start_process(name, args, c_channel_env, port, address)
-
- # The next few methods start up the rest of the BIND-10 processes.
- # Although many of these methods are little more than a call to
- # start_simple, they are retained (a) for testing reasons and (b) as a place
- # where modifications can be made if the process start-up sequence changes
- # for a given process.
-
- def start_auth(self, c_channel_env):
- """
- Start the Authoritative server
- """
- authargs = ['b10-auth']
- if self.nocache:
- authargs += ['-n']
- if self.uid:
- authargs += ['-u', str(self.uid)]
- if self.verbose:
- authargs += ['-v']
-
- # ... and start
- self.start_process("b10-auth", authargs, c_channel_env)
-
- def start_resolver(self, c_channel_env):
- """
- Start the Resolver. At present, all these arguments and switches
- are pure speculation. As with the auth daemon, they should be
- read from the configuration database.
- """
- self.curproc = "b10-resolver"
- # XXX: this must be read from the configuration manager in the future
- resargs = ['b10-resolver']
- if self.uid:
- resargs += ['-u', str(self.uid)]
- if self.verbose:
- resargs += ['-v']
-
- # ... and start
- self.start_process("b10-resolver", resargs, c_channel_env)
-
- def start_xfrout(self, c_channel_env):
- self.start_simple("b10-xfrout", c_channel_env)
-
- def start_xfrin(self, c_channel_env):
- self.start_simple("b10-xfrin", c_channel_env)
-
- def start_zonemgr(self, c_channel_env):
- self.start_simple("b10-zonemgr", c_channel_env)
-
- def start_stats(self, c_channel_env):
- self.start_simple("b10-stats", c_channel_env)
-
- def start_stats_httpd(self, c_channel_env):
- self.start_simple("b10-stats-httpd", c_channel_env)
-
- def start_dhcp6(self, c_channel_env):
- self.start_simple("b10-dhcp6", c_channel_env)
-
- def start_cmdctl(self, c_channel_env):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
-
- def start_all_processes(self):
- """
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
- """
- c_channel_env = self.c_channel_env
- self.start_msgq(c_channel_env)
- self.start_cfgmgr(c_channel_env)
- self.start_ccsession(c_channel_env)
-
- # Extract the parameters associated with Bob. This can only be
- # done after the CC Session is started.
- self.read_bind10_config()
-
- # Continue starting the processes. The authoritative server (if
- # selected):
- if self.cfg_start_auth:
- self.start_auth(c_channel_env)
-
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- self.start_resolver(c_channel_env)
- self.started_resolver_family = True
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- self.start_xfrout(c_channel_env)
- self.start_xfrin(c_channel_env)
- self.start_zonemgr(c_channel_env)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- self.start_stats(c_channel_env)
- self.start_stats_httpd(c_channel_env)
- self.start_cmdctl(c_channel_env)
-
- if self.cfg_start_dhcp6:
- self.start_dhcp6(c_channel_env)
-
- def startup(self):
- """
- Start the BoB instance.
-
- Returns None if successful, otherwise an string describing the
- problem.
- """
- # Try to connect to the c-channel daemon, to see if it is already
- # running
- c_channel_env = {}
- if self.msgq_socket_file is not None:
- c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
- logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
- return "b10-msgq already running, or socket file not cleaned , cannot start"
- except isc.cc.session.SessionError:
- # this is the case we want, where the msgq is not running
- pass
-
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
- try:
- self.c_channel_env = c_channel_env
- self.start_all_processes()
- except Exception as e:
- self.kill_started_processes()
- return "Unable to start " + self.curproc + ": " + str(e)
-
- # Started successfully
- self.runnable = True
- return None
-
- def stop_all_processes(self):
- """Stop all processes."""
- cmd = { "command": ['shutdown']}
-
- self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
- self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
- self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
- self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
- self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
- self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
- self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
- self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
- self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
-
- def stop_process(self, process, recipient):
- """
- Stop the given process, friendly-like. The process is the name it has
- (in logs, etc), the recipient is the address on msgq.
- """
- logger.info(BIND10_STOP_PROCESS, process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
- self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
- recipient)
-
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
-
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
-
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
-
- def shutdown(self):
- """Stop the BoB instance."""
- logger.info(BIND10_SHUTDOWN)
- # first try using the BIND 10 request to stop
- try:
- self.stop_all_processes()
- except:
- pass
- # XXX: some delay probably useful... how much is uncertain
- # I have changed the delay from 0.5 to 1, but sometime it's
- # still not enough.
- time.sleep(1)
- self.reap_children()
- # next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGTERM, proc_info.name,
- proc_info.pid)
- try:
- proc_info.process.terminate()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- # finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGKILL, proc_info.name,
- proc_info.pid)
- try:
- proc_info.process.kill()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- logger.info(BIND10_SHUTDOWN_COMPLETE)
-
- def _get_process_exit_status(self):
- return os.waitpid(-1, os.WNOHANG)
-
- def reap_children(self):
- """Check to see if any of our child processes have exited,
- and note this for later handling.
- """
- while True:
- try:
- (pid, exit_status) = self._get_process_exit_status()
- except OSError as o:
- if o.errno == errno.ECHILD: break
- # XXX: should be impossible to get any other error here
- raise
- if pid == 0: break
- if pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
- proc_info.name, proc_info.pid)
- else:
- logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
- proc_info.name, proc_info.pid,
- exit_status)
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
- else:
- logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
-
- def restart_processes(self):
- """
- Restart any dead processes:
-
- * Returns the time when the next process is ready to be restarted.
- * If the server is shutting down, returns 0.
- * If there are no processes, returns None.
-
- The values returned can be safely passed into select() as the
- timeout value.
- """
- next_restart = None
- # if we're shutting down, then don't restart
- if not self.runnable:
- return 0
- # otherwise look through each dead process and try to restart
- still_dead = {}
- now = time.time()
- for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
- restart_time = proc_info.restart_schedule.get_restart_time(now)
- if restart_time > now:
- if (next_restart is None) or (next_restart > restart_time):
- next_restart = restart_time
- still_dead[proc_info.pid] = proc_info
- else:
- logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
- try:
- proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
- logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
- except:
- still_dead[proc_info.pid] = proc_info
- # remember any processes that refuse to be resurrected
- self.dead_processes = still_dead
- # return the time when the next process is ready to be restarted
- return next_restart
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
- """A child process has died (SIGCHLD received)."""
- # don't do anything...
- # the Python signal handler has been set up to write
- # down a pipe, waking up our select() bit
- pass
-
-def get_signame(signal_number):
- """Return the symbolic name for a signal."""
- for sig in dir(signal):
- if sig.startswith("SIG") and sig[3].isalnum():
- if getattr(signal, sig) == signal_number:
- return sig
- return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
- """We need to exit (SIGINT or SIGTERM received)."""
- global options
- global boss_of_bind
- logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
- """Function that renames the process if it is requested by a option."""
- isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
- """
- Function for parsing command line arguments. Returns the
- options object from OptionParser.
- """
- parser = Parser(version=VERSION)
- parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
- type="string", default=None,
- help="UNIX domain socket file the b10-msgq daemon will use")
- parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
- default=False, help="disable hot-spot cache in authoritative DNS server")
- parser.add_option("-u", "--user", dest="user", type="string", default=None,
- help="Change user after startup (must run as root)")
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
- parser.add_option("--pretty-name", type="string", action="callback",
- callback=process_rename,
- help="Set the process name (displayed in ps, top, ...)")
- parser.add_option("-c", "--config-file", action="store",
- dest="config_file", default=None,
- help="Configuration database filename")
- parser.add_option("-p", "--data-path", dest="data_path",
- help="Directory to search for configuration files",
- default=None)
- parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
- default=None, help="Port of command control")
- parser.add_option("--pid-file", dest="pid_file", type="string",
- default=None,
- help="file to dump the PID of the BIND 10 process")
- parser.add_option("--brittle", dest="brittle", action="store_true",
- help="debugging flag: exit if any component dies")
-
- (options, args) = parser.parse_args(args)
-
- if options.cmdctl_port is not None:
- try:
- isc.net.parse.port_parse(options.cmdctl_port)
- except ValueError as e:
- parser.error(e)
-
- if args:
- parser.print_help()
- sys.exit(1)
-
- return options
-
-def dump_pid(pid_file):
- """
- Dump the PID of the current process to the specified file. If the given
- file is None this function does nothing. If the file already exists,
- the existing content will be removed. If a system error happens in
- creating or writing to the file, the corresponding exception will be
- propagated to the caller.
- """
- if pid_file is None:
- return
- f = open(pid_file, "w")
- f.write('%d\n' % os.getpid())
- f.close()
-
-def unlink_pid_file(pid_file):
- """
- Remove the given file, which is basically expected to be the PID file
- created by dump_pid(). The specified may or may not exist; if it
- doesn't this function does nothing. Other system level errors in removing
- the file will be propagated as the corresponding exception.
- """
- if pid_file is None:
- return
- try:
- os.unlink(pid_file)
- except OSError as error:
- if error.errno is not errno.ENOENT:
- raise
-
-
-def main():
- global options
- global boss_of_bind
- # Enforce line buffering on stdout, even when not a TTY
- sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
- options = parse_args()
-
- # Check user ID.
- setuid = None
- username = None
- if options.user:
- # Try getting information about the user, assuming UID passed.
- try:
- pw_ent = pwd.getpwuid(int(options.user))
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except ValueError:
- pass
- except KeyError:
- pass
-
- # Next try getting information about the user, assuming user name
- # passed.
- # If the information is both a valid user name and user number, we
- # prefer the name because we try it second. A minor point, hopefully.
- try:
- pw_ent = pwd.getpwnam(options.user)
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except KeyError:
- pass
-
- if setuid is None:
- logger.fatal(BIND10_INVALID_USER, options.user)
- sys.exit(1)
-
- # Announce startup.
- logger.info(BIND10_STARTING, VERSION)
-
- # Create wakeup pipe for signal handlers
- wakeup_pipe = os.pipe()
- signal.set_wakeup_fd(wakeup_pipe[1])
-
- # Set signal handlers for catching child termination, as well
- # as our own demise.
- signal.signal(signal.SIGCHLD, reaper)
- signal.siginterrupt(signal.SIGCHLD, False)
- signal.signal(signal.SIGINT, fatal_signal)
- signal.signal(signal.SIGTERM, fatal_signal)
-
- # Block SIGPIPE, as we don't want it to end this process
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port, options.brittle)
- startup_result = boss_of_bind.startup()
- if startup_result:
- logger.fatal(BIND10_STARTUP_ERROR, startup_result)
- sys.exit(1)
- logger.info(BIND10_STARTUP_COMPLETE)
- dump_pid(options.pid_file)
-
- # In our main loop, we check for dead processes or messages
- # on the c-channel.
- wakeup_fd = wakeup_pipe[0]
- ccs_fd = boss_of_bind.ccs.get_socket().fileno()
- while boss_of_bind.runnable:
- # clean up any processes that exited
- boss_of_bind.reap_children()
- next_restart = boss_of_bind.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- logger.fatal(BIND10_SELECT_ERROR, err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- boss_of_bind.ccs.check_command()
- except isc.cc.session.ProtocolError:
- logger.fatal(BIND10_MSGQ_DISAPPEARED)
- self.runnable = False
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- unlink_pid_file(options.pid_file)
- sys.exit(0)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 3f5f637..e10bc7c 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -32,15 +32,15 @@ started according to the configuration.
The boss process was started with the -u option, to drop root privileges
and continue running as the specified user, but the user is unknown.
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
% BIND10_KILL_PROCESS killing process %1
The boss module is sending a kill signal to process with the given name,
as part of the process of killing all started processes during a failed
startup, as described for BIND10_KILLING_ALL_PROCESSES
-% BIND10_KILLING_ALL_PROCESSES killing all started processes
-The boss module was not able to start every process it needed to start
-during startup, and will now kill the processes that did get started.
-
% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
There already appears to be a message bus daemon running. Either an
old process was not shut down correctly, and needs to be killed, or
@@ -113,12 +113,49 @@ it shall send SIGKILL signals to the processes still alive.
All child processes have been stopped, and the boss process will now
stop itself.
-% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
-The given module is being started or restarted without root privileges.
-If the module needs these privileges, it may have problems starting.
-Note that this issue should be resolved by the pending 'socket-creator'
-process; once that has been implemented, modules should not need root
-privileges anymore. See tickets #800 and #801 for more information.
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
+The socket creator terminated unexpectadly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The boss module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The boss module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The boss forwards a request for a socket to the socket creator.
% BIND10_STARTED_PROCESS started %1
The given process has successfully been started.
@@ -147,6 +184,13 @@ All modules have been successfully started, and BIND 10 is now running.
There was a fatal error when BIND10 was trying to start. The error is
shown, and BIND10 will now shut down.
+% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
% BIND10_STOP_PROCESS asking %1 to shut down
The boss module is sending a shutdown command to the given module over
the message channel.
@@ -154,4 +198,3 @@ the message channel.
% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
An unknown child process has exited. The PID is printed, but no further
action will be taken by the boss process.
-
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
new file mode 100755
index 0000000..bbb17a2
--- /dev/null
+++ b/src/bin/bind10/bind10_src.py.in
@@ -0,0 +1,1069 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the Boss of Bind (BoB, or bob) program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the BoB class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
+else:
+ PREFIX = "@prefix@"
+ DATAROOTDIR = "@datarootdir@"
+ SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+from bind10_messages import *
+import bind10.sockcreator
+
+isc.log.init("b10-boss")
+logger = isc.log.Logger("boss")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = 10
+DBG_COMMANDS = 30
+
+# Assign this process some longer name
+isc.util.process.rename(sys.argv[0])
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for bind10.boottime of stats module
+_BASETIME = time.gmtime()
+
+class RestartSchedule:
+ """
+Keeps state when restarting something (in this case, a process).
+
+When a process dies unexpectedly, we need to restart it. However, if
+it fails to restart for some reason, then we should not simply keep
+restarting it at high speed.
+
+A more sophisticated algorithm can be developed, but for now we choose
+a simple set of rules:
+
+ * If a process was been running for >=10 seconds, we restart it
+ right away.
+ * If a process was running for <10 seconds, we wait until 10 seconds
+ after it was started.
+
+To avoid programs getting into lockstep, we use a normal distribution
+to avoid being restarted at exactly 10 seconds."""
+
+ def __init__(self, restart_frequency=10.0):
+ self.restart_frequency = restart_frequency
+ self.run_start_time = None
+ self.run_stop_time = None
+ self.restart_time = None
+
+ def set_run_start_time(self, when=None):
+ if when is None:
+ when = time.time()
+ self.run_start_time = when
+ sigma = self.restart_frequency * 0.05
+ self.restart_time = when + random.normalvariate(self.restart_frequency,
+ sigma)
+
+ def set_run_stop_time(self, when=None):
+ """We don't actually do anything with stop time now, but it
+ might be useful for future algorithms."""
+ if when is None:
+ when = time.time()
+ self.run_stop_time = when
+
+ def get_restart_time(self, when=None):
+ if when is None:
+ when = time.time()
+ return max(when, self.restart_time)
+
+class ProcessInfoError(Exception): pass
+
+class ProcessInfo:
+ """Information about a process"""
+
+ dev_null = open(os.devnull, "w")
+
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False, uid=None, username=None):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.restart_schedule = RestartSchedule()
+ self.uid = uid
+ self.username = username
+ self.process = None
+ self.pid = None
+
+ def _preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # First, put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+ # Second, set the user ID if one has been specified
+ if self.uid is not None:
+ try:
+ posix.setuid(self.uid)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # if we failed to change user due to permission report that
+ raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
+ else:
+ # otherwise simply re-raise whatever error we found
+ raise
+
+ def _spawn(self):
+ if self.dev_null_stdout:
+ spawn_stdout = self.dev_null
+ else:
+ spawn_stdout = None
+ if self.dev_null_stderr:
+ spawn_stderr = self.dev_null
+ else:
+ spawn_stderr = None
+ # Environment variables for the child process will be a copy of those
+ # of the boss process with any additional specific variables given
+ # on construction (self.env).
+ spawn_env = os.environ
+ spawn_env.update(self.env)
+ if 'B10_FROM_SOURCE' not in os.environ:
+ spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
+ self.process = subprocess.Popen(self.args,
+ stdin=subprocess.PIPE,
+ stdout=spawn_stdout,
+ stderr=spawn_stderr,
+ close_fds=True,
+ env=spawn_env,
+ preexec_fn=self._preexec_work)
+ self.pid = self.process.pid
+ self.restart_schedule.set_run_start_time()
+
+ # spawn() and respawn() are the same for now, but in the future they
+ # may have different functionality
+ def spawn(self):
+ self._spawn()
+
+ def respawn(self):
+ self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class BoB:
+ """Boss of BIND class."""
+
+ def __init__(self, msgq_socket_file=None, data_path=None,
+ config_filename=None, nocache=False, verbose=False, setuid=None,
+ username=None, cmdctl_port=None, brittle=False):
+ """
+ Initialize the Boss of BIND. This is a singleton (only one can run).
+
+ The msgq_socket_file specifies the UNIX domain socket file that the
+ msgq process listens on. If verbose is True, then the boss reports
+ what it is doing.
+
+ Data path and config filename are passed trough to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
+ """
+ self.cc_session = None
+ self.ccs = None
+ self.cfg_start_auth = True
+ self.cfg_start_resolver = False
+ self.cfg_start_dhcp6 = False
+ self.cfg_start_dhcp4 = False
+ self.started_auth_family = False
+ self.started_resolver_family = False
+ self.curproc = None
+ self.dead_processes = {}
+ self.msgq_socket_file = msgq_socket_file
+ self.nocache = nocache
+ self.processes = {}
+ self.expected_shutdowns = {}
+ self.runnable = False
+ self.uid = setuid
+ self.username = username
+ self.verbose = verbose
+ self.data_path = data_path
+ self.config_filename = config_filename
+ self.cmdctl_port = cmdctl_port
+ self.brittle = brittle
+ self.sockcreator = None
+
+ def config_handler(self, new_config):
+ # If this is initial update, don't do anything now, leave it to startup
+ if not self.runnable:
+ return
+ # Now we declare few functions used only internally here. Besides the
+ # benefit of not polluting the name space, they are closures, so we
+ # don't need to pass some variables
+ def start_stop(name, started, start, stop):
+ if not'start_' + name in new_config:
+ return
+ if new_config['start_' + name]:
+ if not started:
+ if self.uid is not None:
+ logger.info(BIND10_START_AS_NON_ROOT, name)
+ start()
+ else:
+ stop()
+ # These four functions are passed to start_stop (smells like functional
+ # programming little bit)
+ def resolver_on():
+ self.start_resolver(self.c_channel_env)
+ self.started_resolver_family = True
+ def resolver_off():
+ self.stop_resolver()
+ self.started_resolver_family = False
+ def auth_on():
+ self.start_auth(self.c_channel_env)
+ self.start_xfrout(self.c_channel_env)
+ self.start_xfrin(self.c_channel_env)
+ self.start_zonemgr(self.c_channel_env)
+ self.started_auth_family = True
+ def auth_off():
+ self.stop_zonemgr()
+ self.stop_xfrin()
+ self.stop_xfrout()
+ self.stop_auth()
+ self.started_auth_family = False
+
+ # The real code of the config handler function follows here
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+ new_config)
+ start_stop('resolver', self.started_resolver_family, resolver_on,
+ resolver_off)
+ start_stop('auth', self.started_auth_family, auth_on, auth_off)
+
+ answer = isc.config.ccsession.create_answer(0)
+ return answer
+
+ def get_processes(self):
+ pids = list(self.processes.keys())
+ pids.sort()
+ process_list = [ ]
+ for pid in pids:
+ process_list.append([pid, self.processes[pid].name])
+ return process_list
+
+ def command_handler(self, command, args):
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+ answer = isc.config.ccsession.create_answer(1, "command not implemented")
+ if type(command) != str:
+ answer = isc.config.ccsession.create_answer(1, "bad command")
+ else:
+ if command == "shutdown":
+ self.runnable = False
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "sendstats":
+ # send statistics data to the stats daemon immediately
+ cmd = isc.config.ccsession.create_command(
+ 'set', { "stats_data": {
+ 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }})
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ self.cc_session.group_recvmsg(True, seq)
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "ping":
+ answer = isc.config.ccsession.create_answer(0, "pong")
+ elif command == "show_processes":
+ answer = isc.config.ccsession. \
+ create_answer(0, self.get_processes())
+ else:
+ answer = isc.config.ccsession.create_answer(1,
+ "Unknown command")
+ return answer
+
+ def start_creator(self):
+ self.curproc = 'b10-sockcreator'
+ self.sockcreator = bind10.sockcreator.Creator("@@LIBEXECDIR@@:" +
+ os.environ['PATH'])
+
+ def stop_creator(self, kill=False):
+ if self.sockcreator is None:
+ return
+ if kill:
+ self.sockcreator.kill()
+ else:
+ self.sockcreator.terminate()
+ self.sockcreator = None
+
+ def kill_started_processes(self):
+ """
+ Called as part of the exception handling when a process fails to
+ start, this runs through the list of started processes, killing
+ each one. It then clears that list.
+ """
+ logger.info(BIND10_KILLING_ALL_PROCESSES)
+
+ self.stop_creator(True)
+
+ for pid in self.processes:
+ logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
+ self.processes[pid].process.kill()
+ self.processes = {}
+
+ def read_bind10_config(self):
+ """
+ Reads the parameters associated with the BoB module itself.
+
+ At present these are the components to start although arguably this
+ information should be in the configuration for the appropriate
+ module itself. (However, this would cause difficulty in the case of
+ xfrin/xfrout and zone manager as we don't need to start those if we
+ are not running the authoritative server.)
+ """
+ logger.info(BIND10_READING_BOSS_CONFIGURATION)
+
+ config_data = self.ccs.get_full_config()
+ self.cfg_start_auth = config_data.get("start_auth")
+ self.cfg_start_resolver = config_data.get("start_resolver")
+
+ logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
+ logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+
+ def log_starting(self, process, port = None, address = None):
+ """
+ A convenience function to output a "Starting xxx" message if the
+ logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+ Putting this into a separate method ensures
+ that the output form is consistent across all processes.
+
+ The process name (passed as the first argument) is put into
+ self.curproc, and is used to indicate which process failed to
+ start if there is an error (and is used in the "Started" message
+ on success). The optional port and address information are
+ appended to the message (if present).
+ """
+ self.curproc = process
+ if port is None and address is None:
+ logger.info(BIND10_STARTING_PROCESS, self.curproc)
+ elif address is None:
+ logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+ port)
+ else:
+ logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+ self.curproc, address, port)
+
+ def log_started(self, pid = None):
+ """
+ A convenience function to output a 'Started xxxx (PID yyyy)'
+ message. As with starting_message(), this ensures a consistent
+ format.
+ """
+ if pid is None:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+ else:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+ # The next few methods start the individual processes of BIND-10. They
+ # are called via start_all_processes(). If any fail, an exception is
+ # raised which is caught by the caller of start_all_processes(); this kills
+ # processes started up to that point before terminating the program.
+
+ def start_msgq(self, c_channel_env):
+ """
+ Start the message queue and connect to the command channel.
+ """
+ self.log_starting("b10-msgq")
+ c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
+ True, not self.verbose, uid=self.uid,
+ username=self.username)
+ c_channel.spawn()
+ self.processes[c_channel.pid] = c_channel
+ self.log_started(c_channel.pid)
+
+ # Now connect to the c-channel
+ cc_connect_start = time.time()
+ while self.cc_session is None:
+ # if we have been trying for "a while" give up
+ if (time.time() - cc_connect_start) > 5:
+ raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ except isc.cc.session.SessionError:
+ time.sleep(0.1)
+
+ def start_cfgmgr(self, c_channel_env):
+ """
+ Starts the configuration manager process
+ """
+ self.log_starting("b10-cfgmgr")
+ args = ["b10-cfgmgr"]
+ if self.data_path is not None:
+ args.append("--data-path=" + self.data_path)
+ if self.config_filename is not None:
+ args.append("--config-filename=" + self.config_filename)
+ bind_cfgd = ProcessInfo("b10-cfgmgr", args,
+ c_channel_env, uid=self.uid,
+ username=self.username)
+ bind_cfgd.spawn()
+ self.processes[bind_cfgd.pid] = bind_cfgd
+ self.log_started(bind_cfgd.pid)
+
+ # sleep until b10-cfgmgr is fully up and running, this is a good place
+ # to have a (short) timeout on synchronized groupsend/receive
+ # TODO: replace the sleep by a listen for ConfigManager started
+ # message
+ time.sleep(1)
+
+ def start_ccsession(self, c_channel_env):
+ """
+ Start the CC Session
+
+ The argument c_channel_env is unused but is supplied to keep the
+ argument list the same for all start_xxx methods.
+ """
+ self.log_starting("ccsession")
+ self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.ccs.start()
+ self.log_started()
+
+ # A couple of utility methods for starting processes...
+
+ def start_process(self, name, args, c_channel_env, port=None, address=None):
+ """
+ Given a set of command arguments, start the process and output
+ appropriate log messages. If the start is successful, the process
+ is added to the list of started processes.
+
+ The port and address arguments are for log messages only.
+ """
+ self.log_starting(name, port, address)
+ newproc = ProcessInfo(name, args, c_channel_env)
+ newproc.spawn()
+ self.processes[newproc.pid] = newproc
+ self.log_started(newproc.pid)
+
+ def start_simple(self, name, c_channel_env, port=None, address=None):
+ """
+ Most of the BIND-10 processes are started with the command:
+
+ <process-name> [-v]
+
+ ... where -v is appended if verbose is enabled. This method
+ generates the arguments from the name and starts the process.
+
+ The port and address arguments are for log messages only.
+ """
+ # Set up the command arguments.
+ args = [name]
+ if self.verbose:
+ args += ['-v']
+
+ # ... and start the process
+ self.start_process(name, args, c_channel_env, port, address)
+
+ # The next few methods start up the rest of the BIND-10 processes.
+ # Although many of these methods are little more than a call to
+ # start_simple, they are retained (a) for testing reasons and (b) as a place
+ # where modifications can be made if the process start-up sequence changes
+ # for a given process.
+
+ def start_auth(self, c_channel_env):
+ """
+ Start the Authoritative server
+ """
+ authargs = ['b10-auth']
+ if self.nocache:
+ authargs += ['-n']
+ if self.uid:
+ authargs += ['-u', str(self.uid)]
+ if self.verbose:
+ authargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-auth", authargs, c_channel_env)
+
+ def start_resolver(self, c_channel_env):
+ """
+ Start the Resolver. At present, all these arguments and switches
+ are pure speculation. As with the auth daemon, they should be
+ read from the configuration database.
+ """
+ self.curproc = "b10-resolver"
+ # XXX: this must be read from the configuration manager in the future
+ resargs = ['b10-resolver']
+ if self.uid:
+ resargs += ['-u', str(self.uid)]
+ if self.verbose:
+ resargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-resolver", resargs, c_channel_env)
+
+ def start_xfrout(self, c_channel_env):
+ self.start_simple("b10-xfrout", c_channel_env)
+
+ def start_xfrin(self, c_channel_env):
+ self.start_simple("b10-xfrin", c_channel_env)
+
+ def start_zonemgr(self, c_channel_env):
+ self.start_simple("b10-zonemgr", c_channel_env)
+
+ def start_stats(self, c_channel_env):
+ self.start_simple("b10-stats", c_channel_env)
+
+ def start_stats_httpd(self, c_channel_env):
+ self.start_simple("b10-stats-httpd", c_channel_env)
+
+ def start_dhcp6(self, c_channel_env):
+ self.start_simple("b10-dhcp6", c_channel_env)
+
+ def start_cmdctl(self, c_channel_env):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
+
+ def start_all_processes(self):
+ """
+ Starts up all the processes. Any exception generated during the
+ starting of the processes is handled by the caller.
+ """
+ # The socket creator first, as it is the only thing that needs root
+ self.start_creator()
+ # TODO: Once everything uses the socket creator, we can drop root
+ # privileges right now
+
+ c_channel_env = self.c_channel_env
+ self.start_msgq(c_channel_env)
+ self.start_cfgmgr(c_channel_env)
+ self.start_ccsession(c_channel_env)
+
+ # Extract the parameters associated with Bob. This can only be
+ # done after the CC Session is started.
+ self.read_bind10_config()
+
+ # Continue starting the processes. The authoritative server (if
+ # selected):
+ if self.cfg_start_auth:
+ self.start_auth(c_channel_env)
+
+ # ... and resolver (if selected):
+ if self.cfg_start_resolver:
+ self.start_resolver(c_channel_env)
+ self.started_resolver_family = True
+
+ # Everything after the main components can run as non-root.
+ # TODO: this is only temporary - once the privileged socket creator is
+ # fully working, nothing else will run as root.
+ if self.uid is not None:
+ posix.setuid(self.uid)
+
+ # xfrin/xfrout and the zone manager are only meaningful if the
+ # authoritative server has been started.
+ if self.cfg_start_auth:
+ self.start_xfrout(c_channel_env)
+ self.start_xfrin(c_channel_env)
+ self.start_zonemgr(c_channel_env)
+ self.started_auth_family = True
+
+ # ... and finally start the remaining processes
+ self.start_stats(c_channel_env)
+ self.start_stats_httpd(c_channel_env)
+ self.start_cmdctl(c_channel_env)
+
+ if self.cfg_start_dhcp6:
+ self.start_dhcp6(c_channel_env)
+
+ def startup(self):
+ """
+ Start the BoB instance.
+
+ Returns None if successful, otherwise an string describing the
+ problem.
+ """
+ # Try to connect to the c-channel daemon, to see if it is already
+ # running
+ c_channel_env = {}
+ if self.msgq_socket_file is not None:
+ c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+ logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+ return "b10-msgq already running, or socket file not cleaned , cannot start"
+ except isc.cc.session.SessionError:
+ # this is the case we want, where the msgq is not running
+ pass
+
+ # Start all processes. If any one fails to start, kill all started
+ # processes and exit with an error indication.
+ try:
+ self.c_channel_env = c_channel_env
+ self.start_all_processes()
+ except Exception as e:
+ self.kill_started_processes()
+ return "Unable to start " + self.curproc + ": " + str(e)
+
+ # Started successfully
+ self.runnable = True
+ return None
+
+ def stop_all_processes(self):
+ """Stop all processes."""
+ cmd = { "command": ['shutdown']}
+
+ self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
+ self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
+ self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
+ self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
+ self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
+ self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
+ self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
+ self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
+ self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
+ # Terminate the creator last
+ self.stop_creator()
+
+ def stop_process(self, process, recipient):
+ """
+ Stop the given process, friendly-like. The process is the name it has
+ (in logs, etc), the recipient is the address on msgq.
+ """
+ logger.info(BIND10_STOP_PROCESS, process)
+ # TODO: Some timeout to solve processes that don't want to die would
+ # help. We can even store it in the dict, it is used only as a set
+ self.expected_shutdowns[process] = 1
+ # Ask the process to die willingly
+ self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
+ recipient)
+
+ # Series of stop_process wrappers
+ def stop_resolver(self):
+ self.stop_process('b10-resolver', 'Resolver')
+
+ def stop_auth(self):
+ self.stop_process('b10-auth', 'Auth')
+
+ def stop_xfrout(self):
+ self.stop_process('b10-xfrout', 'Xfrout')
+
+ def stop_xfrin(self):
+ self.stop_process('b10-xfrin', 'Xfrin')
+
+ def stop_zonemgr(self):
+ self.stop_process('b10-zonemgr', 'Zonemgr')
+
+ def shutdown(self):
+ """Stop the BoB instance."""
+ logger.info(BIND10_SHUTDOWN)
+ # first try using the BIND 10 request to stop
+ try:
+ self.stop_all_processes()
+ except:
+ pass
+ # XXX: some delay probably useful... how much is uncertain
+ # I have changed the delay from 0.5 to 1, but sometime it's
+ # still not enough.
+ time.sleep(1)
+ self.reap_children()
+ # next try sending a SIGTERM
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.terminate()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ # finally, send SIGKILL (unmaskable termination) until everybody dies
+ while self.processes:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.kill()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+ def _get_process_exit_status(self):
+ return os.waitpid(-1, os.WNOHANG)
+
+ def reap_children(self):
+ """Check to see if any of our child processes have exited,
+ and note this for later handling.
+ """
+ while True:
+ try:
+ (pid, exit_status) = self._get_process_exit_status()
+ except OSError as o:
+ if o.errno == errno.ECHILD: break
+ # XXX: should be impossible to get any other error here
+ raise
+ if pid == 0: break
+ if self.sockcreator is not None and self.sockcreator.pid() == pid:
+ # This is the socket creator, started and terminated
+ # differently. This can't be restarted.
+ if self.runnable:
+ logger.fatal(BIND10_SOCKCREATOR_CRASHED)
+ self.sockcreator = None
+ self.runnable = False
+ elif pid in self.processes:
+ # One of the processes we know about. Get information on it.
+ proc_info = self.processes.pop(pid)
+ proc_info.restart_schedule.set_run_stop_time()
+ self.dead_processes[proc_info.pid] = proc_info
+
+ # Write out message, but only if in the running state:
+ # During startup and shutdown, these messages are handled
+ # elsewhere.
+ if self.runnable:
+ if exit_status is None:
+ logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
+ proc_info.name, proc_info.pid)
+ else:
+ logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
+ proc_info.name, proc_info.pid,
+ exit_status)
+
+ # Was it a special process?
+ if proc_info.name == "b10-msgq":
+ logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
+ self.runnable = False
+
+ # If we're in 'brittle' mode, we want to shutdown after
+ # any process dies.
+ if self.brittle:
+ self.runnable = False
+ else:
+ logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+ def restart_processes(self):
+ """
+ Restart any dead processes:
+
+ * Returns the time when the next process is ready to be restarted.
+ * If the server is shutting down, returns 0.
+ * If there are no processes, returns None.
+
+ The values returned can be safely passed into select() as the
+ timeout value.
+ """
+ next_restart = None
+ # if we're shutting down, then don't restart
+ if not self.runnable:
+ return 0
+ # otherwise look through each dead process and try to restart
+ still_dead = {}
+ now = time.time()
+ for proc_info in self.dead_processes.values():
+ if proc_info.name in self.expected_shutdowns:
+ # We don't restart, we wanted it to die
+ del self.expected_shutdowns[proc_info.name]
+ continue
+ restart_time = proc_info.restart_schedule.get_restart_time(now)
+ if restart_time > now:
+ if (next_restart is None) or (next_restart > restart_time):
+ next_restart = restart_time
+ still_dead[proc_info.pid] = proc_info
+ else:
+ logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
+ try:
+ proc_info.respawn()
+ self.processes[proc_info.pid] = proc_info
+ logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
+ except:
+ still_dead[proc_info.pid] = proc_info
+ # remember any processes that refuse to be resurrected
+ self.dead_processes = still_dead
+ # return the time when the next process is ready to be restarted
+ return next_restart
+
+# global variables, needed for signal handlers
+options = None
+boss_of_bind = None
+
+def reaper(signal_number, stack_frame):
+ """A child process has died (SIGCHLD received)."""
+ # don't do anything...
+ # the Python signal handler has been set up to write
+ # down a pipe, waking up our select() bit
+ pass
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+ """We need to exit (SIGINT or SIGTERM received)."""
+ global options
+ global boss_of_bind
+ logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+ """Function that renames the process if it is requested by a option."""
+ isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+ """
+ Function for parsing command line arguments. Returns the
+ options object from OptionParser.
+ """
+ parser = Parser(version=VERSION)
+ parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+ type="string", default=None,
+ help="UNIX domain socket file the b10-msgq daemon will use")
+ parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
+ default=False, help="disable hot-spot cache in authoritative DNS server")
+ parser.add_option("-u", "--user", dest="user", type="string", default=None,
+ help="Change user after startup (must run as root)")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
+ parser.add_option("--pretty-name", type="string", action="callback",
+ callback=process_rename,
+ help="Set the process name (displayed in ps, top, ...)")
+ parser.add_option("-c", "--config-file", action="store",
+ dest="config_file", default=None,
+ help="Configuration database filename")
+ parser.add_option("-p", "--data-path", dest="data_path",
+ help="Directory to search for configuration files",
+ default=None)
+ parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+ default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+ parser.add_option("--brittle", dest="brittle", action="store_true",
+ help="debugging flag: exit if any component dies")
+
+ (options, args) = parser.parse_args(args)
+
+ if options.cmdctl_port is not None:
+ try:
+ isc.net.parse.port_parse(options.cmdctl_port)
+ except ValueError as e:
+ parser.error(e)
+
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ return options
+
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+
+def main():
+ global options
+ global boss_of_bind
+ # Enforce line buffering on stdout, even when not a TTY
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+ options = parse_args()
+
+ # Check user ID.
+ setuid = None
+ username = None
+ if options.user:
+ # Try getting information about the user, assuming UID passed.
+ try:
+ pw_ent = pwd.getpwuid(int(options.user))
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except ValueError:
+ pass
+ except KeyError:
+ pass
+
+ # Next try getting information about the user, assuming user name
+ # passed.
+ # If the information is both a valid user name and user number, we
+ # prefer the name because we try it second. A minor point, hopefully.
+ try:
+ pw_ent = pwd.getpwnam(options.user)
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except KeyError:
+ pass
+
+ if setuid is None:
+ logger.fatal(BIND10_INVALID_USER, options.user)
+ sys.exit(1)
+
+ # Announce startup.
+ logger.info(BIND10_STARTING, VERSION)
+
+ # Create wakeup pipe for signal handlers
+ wakeup_pipe = os.pipe()
+ signal.set_wakeup_fd(wakeup_pipe[1])
+
+ # Set signal handlers for catching child termination, as well
+ # as our own demise.
+ signal.signal(signal.SIGCHLD, reaper)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ signal.signal(signal.SIGINT, fatal_signal)
+ signal.signal(signal.SIGTERM, fatal_signal)
+
+ # Block SIGPIPE, as we don't want it to end this process
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+ # Go bob!
+ boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+ options.config_file, options.nocache, options.verbose,
+ setuid, username, options.cmdctl_port, options.brittle)
+ startup_result = boss_of_bind.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # In our main loop, we check for dead processes or messages
+ # on the c-channel.
+ wakeup_fd = wakeup_pipe[0]
+ ccs_fd = boss_of_bind.ccs.get_socket().fileno()
+ while boss_of_bind.runnable:
+ # clean up any processes that exited
+ boss_of_bind.reap_children()
+ next_restart = boss_of_bind.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ boss_of_bind.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.shutdown()
+ unlink_pid_file(options.pid_file)
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 4020593..bb44ca0 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -23,14 +23,14 @@ BIND10_PATH=@abs_top_builddir@/src/bin/bind10
PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config
+PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bind10/sockcreator.py b/src/bin/bind10/sockcreator.py
new file mode 100644
index 0000000..9fcc74e
--- /dev/null
+++ b/src/bin/bind10/sockcreator.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+import os
+import subprocess
+from bind10_messages import *
+from libutil_io_python import recv_fd
+
+logger = isc.log.Logger("boss")
+
+"""
+Module that comunicates with the privileged socket creator (b10-sockcreator).
+"""
+
+class CreatorError(Exception):
+ """
+ Exception for socket creator related errors.
+
+ It has two members: fatal and errno and they are just holding the values
+ passed to the __init__ function.
+ """
+
+ def __init__(self, message, fatal, errno=None):
+ """
+ Creates the exception. The message argument is the usual string.
+ The fatal one tells if the error is fatal (eg. the creator crashed)
+ and errno is the errno value returned from socket creator, if
+ applicable.
+ """
+ Exception.__init__(self, message)
+ self.fatal = fatal
+ self.errno = errno
+
+class Parser:
+ """
+ This class knows the sockcreator language. It creates commands, sends them
+ and receives the answers and parses them.
+
+ It does not start it, the communication channel must be provided.
+
+ In theory, anything here can throw a fatal CreatorError exception, but it
+ happens only in case something like the creator process crashes. Any other
+ occasions are mentioned explicitly.
+ """
+
+ def __init__(self, creator_socket):
+ """
+ Creates the parser. The creator_socket is socket to the socket creator
+ process that will be used for communication. However, the object must
+ have a read_fd() method to read the file descriptor. This slightly
+ unusual trick with modifying an object is used to easy up testing.
+
+ You can use WrappedSocket in production code to add the method to any
+ ordinary socket.
+ """
+ self.__socket = creator_socket
+ logger.info(BIND10_SOCKCREATOR_INIT)
+
+ def terminate(self):
+ """
+ Asks the creator process to terminate and waits for it to close the
+ socket. Does not return anything. Raises a CreatorError if there is
+ still data on the socket, if there is an error closing the socket,
+ or if the socket had already been closed.
+ """
+ if self.__socket is None:
+ raise CreatorError('Terminated already', True)
+ logger.info(BIND10_SOCKCREATOR_TERMINATE)
+ try:
+ self.__socket.sendall(b'T')
+ # Wait for an EOF - it will return empty data
+ eof = self.__socket.recv(1)
+ if len(eof) != 0:
+ raise CreatorError('Protocol error - data after terminated',
+ True)
+ self.__socket = None
+ except socket.error as se:
+ self.__socket = None
+ raise CreatorError(str(se), True)
+
+ def get_socket(self, address, port, socktype):
+ """
+ Asks the socket creator process to create a socket. Pass an address
+ (the isc.net.IPaddr object), port number and socket type (either
+ string "UDP", "TCP" or constant socket.SOCK_DGRAM or
+ socket.SOCK_STREAM.
+
+ Blocks until it is provided by the socket creator process (which
+ should be fast, as it is on localhost) and returns the file descriptor
+ number. It raises a CreatorError exception if the creation fails.
+ """
+ if self.__socket is None:
+ raise CreatorError('Socket requested on terminated creator', True)
+ # First, assemble the request from parts
+ logger.info(BIND10_SOCKET_GET, address, port, socktype)
+ data = b'S'
+ if socktype == 'UDP' or socktype == socket.SOCK_DGRAM:
+ data += b'U'
+ elif socktype == 'TCP' or socktype == socket.SOCK_STREAM:
+ data += b'T'
+ else:
+ raise ValueError('Unknown socket type: ' + str(socktype))
+ if address.family == socket.AF_INET:
+ data += b'4'
+ elif address.family == socket.AF_INET6:
+ data += b'6'
+ else:
+ raise ValueError('Unknown address family in address')
+ data += struct.pack('!H', port)
+ data += address.addr
+ try:
+ # Send the request
+ self.__socket.sendall(data)
+ answer = self.__socket.recv(1)
+ if answer == b'S':
+ # Success!
+ result = self.__socket.read_fd()
+ logger.info(BIND10_SOCKET_CREATED, result)
+ return result
+ elif answer == b'E':
+ # There was an error, read the error as well
+ error = self.__socket.recv(1)
+ errno = struct.unpack('i',
+ self.__read_all(len(struct.pack('i',
+ 0))))
+ if error == b'S':
+ cause = 'socket'
+ elif error == b'B':
+ cause = 'bind'
+ else:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_BAD_CAUSE, error)
+ raise CreatorError('Unknown error cause' + str(answer), True)
+ logger.error(BIND10_SOCKET_ERROR, cause, errno[0],
+ os.strerror(errno[0]))
+ raise CreatorError('Error creating socket on ' + cause, False,
+ errno[0])
+ else:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_BAD_RESPONSE, answer)
+ raise CreatorError('Unknown response ' + str(answer), True)
+ except socket.error as se:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_TRANSPORT_ERROR, str(se))
+ raise CreatorError(str(se), True)
+
+ def __read_all(self, length):
+ """
+ Keeps reading until length data is read or EOF or error happens.
+
+ EOF is considered error as well and throws a CreatorError.
+ """
+ result = b''
+ while len(result) < length:
+ data = self.__socket.recv(length - len(result))
+ if len(data) == 0:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_EOF)
+ raise CreatorError('Unexpected EOF', True)
+ result += data
+ return result
+
+class WrappedSocket:
+ """
+ This class wraps a socket and adds a read_fd method, so it can be used
+ for the Parser class conveniently. It simply copies all its guts into
+ itself and implements the method.
+ """
+ def __init__(self, socket):
+ # Copy whatever can be copied from the socket
+ for name in dir(socket):
+ if name not in ['__class__', '__weakref__']:
+ setattr(self, name, getattr(socket, name))
+ # Keep the socket, so we can prevent it from being garbage-collected
+ # and closed before we are removed ourself
+ self.__orig_socket = socket
+
+ def read_fd(self):
+ """
+ Read the file descriptor from the socket.
+ """
+ return recv_fd(self.fileno())
+
+# FIXME: Any idea how to test this? Starting an external process doesn't sound
+# OK
+class Creator(Parser):
+ """
+ This starts the socket creator and allows asking for the sockets.
+ """
+ def __init__(self, path):
+ (local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
+ # Popen does not like, for some reason, having the same socket for
+ # stdin as well as stdout, so we dup it before passing it there.
+ remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
+ socket.SOCK_STREAM)
+ env = os.environ
+ env['PATH'] = path
+ self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
+ stdin=remote.fileno(),
+ stdout=remote2.fileno())
+ remote.close()
+ remote2.close()
+ Parser.__init__(self, WrappedSocket(local))
+
+ def pid(self):
+ return self.__process.pid
+
+ def kill(self):
+ logger.warn(BIND10_SOCKCREATOR_KILL)
+ if self.__process is not None:
+ self.__process.kill()
+ self.__process = None
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 3d8d57a..6d758b3 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -1,14 +1,13 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = bind10_test.py
-EXTRA_DIST = $(PYTESTS)
+PYTESTS = bind10_test.py sockcreator_test.py
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -21,7 +20,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 9d794a6..077190c 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
# XXX: environment tests are currently disabled, due to the preprocessor
# setup that we have now complicating the environment
@@ -193,6 +193,13 @@ class MockBob(BoB):
self.cmdctl = False
self.c_channel_env = {}
self.processes = { }
+ self.creator = False
+
+ def start_creator(self):
+ self.creator = True
+
+ def stop_creator(self, kill=False):
+ self.creator = False
def read_bind10_config(self):
# Configuration options are set directly
@@ -337,6 +344,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.assertEqual(bob.msgq, core)
self.assertEqual(bob.cfgmgr, core)
self.assertEqual(bob.ccsession, core)
+ self.assertEqual(bob.creator, core)
self.assertEqual(bob.auth, auth)
self.assertEqual(bob.resolver, resolver)
self.assertEqual(bob.xfrout, auth)
diff --git a/src/bin/bind10/tests/sockcreator_test.py.in b/src/bin/bind10/tests/sockcreator_test.py.in
new file mode 100644
index 0000000..53e7035
--- /dev/null
+++ b/src/bin/bind10/tests/sockcreator_test.py.in
@@ -0,0 +1,327 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This test file is generated .py.in -> .py just to be in the build dir,
+# same as the rest of the tests. Saves a lot of stuff in makefile.
+
+"""
+Tests for the bind10.sockcreator module.
+"""
+
+import unittest
+import struct
+import socket
+from isc.net.addr import IPAddr
+import isc.log
+from libutil_io_python import send_fd
+from bind10.sockcreator import Parser, CreatorError, WrappedSocket
+
+class FakeCreator:
+ """
+ Class emulating the socket to the socket creator. It can be given expected
+ data to receive (and check) and responses to give to the Parser class
+ during testing.
+ """
+
+ class InvalidPlan(Exception):
+ """
+ Raised when someone wants to recv when sending is planned or vice
+ versa.
+ """
+ pass
+
+ class InvalidData(Exception):
+ """
+ Raises when the data passed to sendall are not the same as expected.
+ """
+ pass
+
+ def __init__(self, plan):
+ """
+ Create the object. The plan variable contains list of expected actions,
+ in form:
+
+ [('r', 'Data to return from recv'), ('s', 'Data expected on sendall'),
+ , ('d', 'File descriptor number to return from read_sock'), ('e',
+ None), ...]
+
+ It modifies the array as it goes.
+ """
+ self.__plan = plan
+
+ def __get_plan(self, expected):
+ if len(self.__plan) == 0:
+ raise InvalidPlan('Nothing more planned')
+ (kind, data) = self.__plan[0]
+ if kind == 'e':
+ self.__plan.pop(0)
+ raise socket.error('False socket error')
+ if kind != expected:
+ raise InvalidPlan('Planned ' + kind + ', but ' + expected +
+ 'requested')
+ return data
+
+ def recv(self, maxsize):
+ """
+ Emulate recv. Returs maxsize bytes from the current recv plan. If
+ there are data left from previous recv call, it is used first.
+
+ If no recv is planned, raises InvalidPlan.
+ """
+ data = self.__get_plan('r')
+ result, rest = data[:maxsize], data[maxsize:]
+ if len(rest) > 0:
+ self.__plan[0] = ('r', rest)
+ else:
+ self.__plan.pop(0)
+ return result
+
+ def read_fd(self):
+ """
+ Emulate the reading of file descriptor. Returns one from a plan.
+
+ It raises InvalidPlan if no socket is planned now.
+ """
+ fd = self.__get_plan('f')
+ self.__plan.pop(0)
+ return fd
+
+ def sendall(self, data):
+ """
+ Checks that the data passed are correct according to plan. It raises
+ InvalidData if the data differs or InvalidPlan when sendall is not
+ expected.
+ """
+ planned = self.__get_plan('s')
+ dlen = len(data)
+ prefix, rest = planned[:dlen], planned[dlen:]
+ if prefix != data:
+ raise InvalidData('Expected "' + str(prefix)+ '", got "' +
+ str(data) + '"')
+ if len(rest) > 0:
+ self.__plan[0] = ('s', rest)
+ else:
+ self.__plan.pop(0)
+
+ def all_used(self):
+ """
+ Returns if the whole plan was consumed.
+ """
+ return len(self.__plan) == 0
+
+class ParserTests(unittest.TestCase):
+ """
+ Testcases for the Parser class.
+
+ A lot of these test could be done by
+ `with self.assertRaises(CreatorError) as cm`. But some versions of python
+ take the scope wrong and don't work, so we use the primitive way of
+ try-except.
+ """
+ def __terminate(self):
+ creator = FakeCreator([('s', b'T'), ('r', b'')])
+ parser = Parser(creator)
+ self.assertEqual(None, parser.terminate())
+ self.assertTrue(creator.all_used())
+ return parser
+
+ def test_terminate(self):
+ """
+ Test if the command to terminate is correct and it waits for reading the
+ EOF.
+ """
+ self.__terminate()
+
+ def __terminate_raises(self, parser):
+ """
+ Check that terminate() raises a fatal exception.
+ """
+ try:
+ parser.terminate()
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_terminate_error1(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one raises an error when receiving the EOF.
+ """
+ creator = FakeCreator([('s', b'T'), ('e', None)])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_error2(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one raises an error when sending data.
+ """
+ creator = FakeCreator([('e', None)])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_error3(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one sends data when it should have terminated.
+ """
+ creator = FakeCreator([('s', b'T'), ('r', b'Extra data')])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_twice(self):
+ """
+ Test we can't terminate twice.
+ """
+ parser = self.__terminate()
+ self.__terminate_raises(parser)
+
+ def test_crash(self):
+ """
+ Tests that the parser correctly raises exception when it crashes
+ unexpectedly.
+ """
+ creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'')])
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ # Is the exception correct?
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_error(self):
+ """
+ Tests that the parser correctly raises non-fatal exception when
+ the socket can not be created.
+ """
+ # We split the int to see if it can cope with data coming in
+ # different packets
+ intpart = struct.pack('@i', 42)
+ creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'ES' +
+ intpart[:1]), ('r', intpart[1:])])
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ # Is the exception correct?
+ self.assertFalse(ce.fatal)
+ self.assertEqual(42, ce.errno)
+
+ def __error(self, plan):
+ creator = FakeCreator(plan)
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, socket.SOCK_DGRAM)
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ self.assertTrue(ce.fatal)
+
+ def test_error_send(self):
+ self.__error([('e', None)])
+
+ def test_error_recv(self):
+ self.__error([('s', b'SU4\0\0\0\0\0\0'), ('e', None)])
+
+ def test_error_read_fd(self):
+ self.__error([('s', b'SU4\0\0\0\0\0\0'), ('r', b'S'), ('e', None)])
+
+ def __create(self, addr, socktype, encoded):
+ creator = FakeCreator([('s', b'S' + encoded), ('r', b'S'), ('f', 42)])
+ parser = Parser(creator)
+ self.assertEqual(42, parser.get_socket(IPAddr(addr), 42, socktype))
+
+ def test_create1(self):
+ self.__create('192.0.2.0', 'UDP', b'U4\0\x2A\xC0\0\x02\0')
+
+ def test_create2(self):
+ self.__create('2001:db8::', socket.SOCK_STREAM,
+ b'T6\0\x2A\x20\x01\x0d\xb8\0\0\0\0\0\0\0\0\0\0\0\0')
+
+ def test_create_terminated(self):
+ """
+ Test we can't request sockets after it was terminated.
+ """
+ parser = self.__terminate()
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_invalid_socktype(self):
+ """
+ Test invalid socket type is rejected
+ """
+ self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+ IPAddr('0.0.0.0'), 42, 'RAW')
+
+ def test_invalid_family(self):
+ """
+ Test it rejects invalid address family.
+ """
+ # Note: this produces a bad logger output, since this address
+ # can not be converted to string, so the original message with
+ # placeholders is output. This should not happen in practice, so
+ # it is harmless.
+ addr = IPAddr('0.0.0.0')
+ addr.family = 42
+ self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+ addr, 42, socket.SOCK_DGRAM)
+
+class WrapTests(unittest.TestCase):
+ """
+ Tests for the wrap_socket function.
+ """
+ def test_wrap(self):
+ # We construct two pairs of socket. The receiving side of one pair will
+ # be wrapped. Then we send one of the other pair through this pair and
+ # check the received one can be used as a socket
+
+ # The transport socket
+ (t1, t2) = socket.socketpair()
+ # The payload socket
+ (p1, p2) = socket.socketpair()
+
+ t2 = WrappedSocket(t2)
+
+ # Transfer the descriptor
+ send_fd(t1.fileno(), p1.fileno())
+ p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
+
+ # Now, pass some data trough the socket
+ p1.send(b'A')
+ data = p2.recv(1)
+ self.assertEqual(b'A', data)
+
+ # Test the wrapping didn't hurt the socket's usual methods
+ t1.send(b'B')
+ data = t2.recv(1)
+ self.assertEqual(b'B', data)
+ t2.send(b'C')
+ data = t1.recv(1)
+ self.assertEqual(b'C', data)
+
+if __name__ == '__main__':
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 0bfcda5..8c2b674 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -398,6 +398,8 @@ class BindCmdInterpreter(Cmd):
print("Error: " + str(dte))
except isc.cc.data.DataNotFoundError as dnfe:
print("Error: " + str(dnfe))
+ except isc.cc.data.DataAlreadyPresentError as dape:
+ print("Error: " + str(dape))
except KeyError as ke:
print("Error: missing " + str(ke))
else:
@@ -634,7 +636,15 @@ class BindCmdInterpreter(Cmd):
# we have more data to show
line += "/"
else:
- line += "\t" + json.dumps(value_map['value'])
+ # if type is named_set, don't print value if None
+ # (it is either {} meaning empty, or None, meaning
+ # there actually is data, but not to be shown with
+ # the current command
+ if value_map['type'] == 'named_set' and\
+ value_map['value'] is None:
+ line += "/\t"
+ else:
+ line += "\t" + json.dumps(value_map['value'])
line += "\t" + value_map['type']
line += "\t"
if value_map['default']:
@@ -649,10 +659,9 @@ class BindCmdInterpreter(Cmd):
data, default = self.config_data.get_value(identifier)
print(json.dumps(data))
elif cmd.command == "add":
- if 'value' in cmd.params:
- self.config_data.add_value(identifier, cmd.params['value'])
- else:
- self.config_data.add_value(identifier)
+ self.config_data.add_value(identifier,
+ cmd.params.get('value_or_name'),
+ cmd.params.get('value_for_set'))
elif cmd.command == "remove":
if 'value' in cmd.params:
self.config_data.remove_value(identifier, cmd.params['value'])
@@ -679,7 +688,7 @@ class BindCmdInterpreter(Cmd):
except isc.config.ModuleCCSessionError as mcse:
print(str(mcse))
elif cmd.command == "diff":
- print(self.config_data.get_local_changes());
+ print(self.config_data.get_local_changes())
elif cmd.command == "go":
self.go(identifier)
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 01307e9..ee4191d 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -50,17 +50,28 @@ def prepare_config_commands(tool):
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
+ cmd = CommandInfo(name = "add", desc =
+ "Add an entry to configuration list or a named set. "
+ "When adding to a list, the command has one optional argument, "
+ "a value to add to the list. The value must be in correct JSON "
+ "and complete. When adding to a named set, it has one "
+ "mandatory parameter (the name to add), and an optional "
+ "parameter value, similar to when adding to a list. "
+ "In either case, when no value is given, an entry will be "
+ "constructed with default values.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value_or_name", type = "string", optional=True, desc = "Specifies a value to add to the list, or the name when adding to a named set. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+ param = ParamInfo(name = "value_for_set", type = "string", optional=True, desc = "Specifies an optional value to add to the named map. It must be in correct JSON format and complete.")
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
+ cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list or named set.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value", type = "string", optional=True, desc = "When identifier is a list, specifies a value to remove from the list. It must be in correct JSON format and complete. When it is a named set, specifies the name to remove.")
cmd.add_param(param)
module.add_command(cmd)
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index a35284f..4a0e918 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -8,14 +8,14 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
index 61ec009..5ae1f5e 100644
--- a/src/bin/dhcp6/tests/dhcp6_test.py
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index fb9621b..6af383a 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -520,7 +520,8 @@ ResolverImpl::processNormalQuery(const IOMessage& io_message,
const Client client(io_message);
const BasicAction query_action(
getQueryACL().execute(acl::dns::RequestContext(
- client.getRequestSourceIPAddress())));
+ client.getRequestSourceIPAddress(),
+ query_message->getTSIGRecord())));
if (query_action == isc::acl::REJECT) {
LOG_INFO(resolver_logger, RESOLVER_QUERY_REJECTED)
.arg(question->getName()).arg(qtype).arg(qclass).arg(client);
diff --git a/src/bin/resolver/tests/resolver_config_unittest.cc b/src/bin/resolver/tests/resolver_config_unittest.cc
index 698e535..c089041 100644
--- a/src/bin/resolver/tests/resolver_config_unittest.cc
+++ b/src/bin/resolver/tests/resolver_config_unittest.cc
@@ -72,7 +72,8 @@ protected:
IOSocket::getDummyUDPSocket(),
*endpoint));
client.reset(new Client(*query_message));
- request.reset(new RequestContext(client->getRequestSourceIPAddress()));
+ request.reset(new RequestContext(client->getRequestSourceIPAddress(),
+ NULL));
return (*request);
}
void invalidTest(const string &JSON, const string& name);
diff --git a/src/bin/sockcreator/README b/src/bin/sockcreator/README
index 4dbbee7..e142d19 100644
--- a/src/bin/sockcreator/README
+++ b/src/bin/sockcreator/README
@@ -3,7 +3,7 @@ The socket creator
The only thing we need higher rights than standard user is binding sockets to
ports lower than 1024. So we will have a separate process that keeps the
-rights, while the rests drop them for security reasons.
+rights, while the rest drops them for security reasons.
This process is the socket creator. Its goal is to be as simple as possible
and to contain as little code as possible to minimise the amount of code
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index e353a60..62c7708 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -137,7 +137,8 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(rcode.to_text(), "NOTAUTH")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
# NOERROR
- self.xfrsess._tsig_key_ring.add(TSIG_KEY)
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOERROR")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
@@ -172,6 +173,54 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(rcode.to_text(), "NOTAUTH")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
+ # ACL using TSIG: successful case
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
+ ])
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+
+ # ACL using TSIG: key name doesn't match; should be rejected
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ])
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # ACL using TSIG: no TSIG; should be rejected
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ])
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ #
+ # ACL using IP + TSIG: both should match
+ #
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
+ "action": "ACCEPT"},
+ {"action": "REJECT"}
+ ])
+ # both matches
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # TSIG matches, but address doesn't
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Address matches, but TSIG doesn't (not included)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Neither address nor TSIG matches
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
def test_get_query_zone_name(self):
msg = self.getmsg()
self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 2e94369..fe42c54 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -147,7 +147,8 @@ class XfroutSession():
if rcode == Rcode.NOERROR():
# ACL checks
acl_result = self._acl.execute(
- isc.acl.dns.RequestContext(self._remote))
+ isc.acl.dns.RequestContext(self._remote,
+ msg.get_tsig_record()))
if acl_result == DROP:
logger.info(XFROUT_QUERY_DROPPED,
self._get_query_zone_name(msg),
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index f211025..92b7869 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -19,7 +19,7 @@ libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
# DNS specialized one
lib_LTLIBRARIES += libdnsacl.la
-libdnsacl_la_SOURCES = dns.h dns.cc
+libdnsacl_la_SOURCES = dns.h dns.cc dnsname_check.h
libdnsacl_la_LIBADD = libacl.la
libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
index cb948eb..b9cf91f 100644
--- a/src/lib/acl/dns.cc
+++ b/src/lib/acl/dns.cc
@@ -20,15 +20,20 @@
#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/tsigrecord.h>
+
#include <cc/data.h>
#include <acl/dns.h>
#include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
#include <acl/loader.h>
#include <acl/logic_check.h>
using namespace std;
using boost::shared_ptr;
+using namespace isc::dns;
using namespace isc::data;
namespace isc {
@@ -39,9 +44,6 @@ namespace acl {
/// It returns \c true if the remote (source) IP address of the request
/// matches the expression encapsulated in the \c IPCheck, and returns
/// \c false if not.
-///
-/// \note The match logic is expected to be extended as we add
-/// more match parameters (at least there's a plan for TSIG key).
template <>
bool
IPCheck<dns::RequestContext>::matches(
@@ -53,6 +55,18 @@ IPCheck<dns::RequestContext>::matches(
namespace dns {
+/// The specialization of \c NameCheck for access control with
+/// \c RequestContext.
+///
+/// It returns \c true if the request contains a TSIG record and its key
+/// (owner) name is equal to the name stored in the check; otherwise
+/// it returns \c false.
+template<>
+bool
+NameCheck<RequestContext>::matches(const RequestContext& request) const {
+ return (request.tsig != NULL && request.tsig->getName() == name_);
+}
+
vector<string>
internal::RequestCheckCreator::names() const {
// Probably we should eventually build this vector in a more
@@ -60,6 +74,7 @@ internal::RequestCheckCreator::names() const {
// everything.
vector<string> supported_names;
supported_names.push_back("from");
+ supported_names.push_back("key");
return (supported_names);
}
@@ -77,6 +92,10 @@ internal::RequestCheckCreator::create(const string& name,
if (name == "from") {
return (shared_ptr<internal::RequestIPCheck>(
new internal::RequestIPCheck(definition->stringValue())));
+ } else if (name == "key") {
+ return (shared_ptr<internal::RequestKeyCheck>(
+ new internal::RequestKeyCheck(
+ Name(definition->stringValue()))));
} else {
// This case shouldn't happen (normally) as it should have been
// rejected at the loader level. But we explicitly catch the case
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
index 118e5fd..426c961 100644
--- a/src/lib/acl/dns.h
+++ b/src/lib/acl/dns.h
@@ -23,9 +23,13 @@
#include <cc/data.h>
#include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
#include <acl/loader.h>
namespace isc {
+namespace dns {
+class TSIGRecord;
+}
namespace acl {
namespace dns {
@@ -53,9 +57,9 @@ namespace dns {
* used only for a very short period as stated above.
*
* Based on the minimalist philosophy, the initial implementation only
- * maintains the remote (source) IP address of the request. The plan is
- * to add more parameters of the request. A scheduled next step is to
- * support the TSIG key (if it's included in the request). Other possibilities
+ * maintains the remote (source) IP address of the request and (optionally)
+ * the TSIG record included in the request. We may add more parameters of
+ * the request as we see the need for them. Possible additional parameters
* are the local (destination) IP address, the remote and local port numbers,
* various fields of the DNS request (e.g. a particular header flag value).
*/
@@ -68,8 +72,12 @@ struct RequestContext {
/// \exception None
///
/// \parameter remote_address_param The remote IP address
- explicit RequestContext(const IPAddress& remote_address_param) :
- remote_address(remote_address_param)
+ /// \parameter tsig_param A valid pointer to the TSIG record included in
+ /// the request or NULL if the request doesn't contain a TSIG.
+ RequestContext(const IPAddress& remote_address_param,
+ const isc::dns::TSIGRecord* tsig_param) :
+ remote_address(remote_address_param),
+ tsig(tsig_param)
{}
///
@@ -83,6 +91,11 @@ struct RequestContext {
//@{
/// \brief The remote IP address (eg. the client's IP address).
const IPAddress& remote_address;
+
+ /// \brief The TSIG record included in the request message, if any.
+ ///
+ /// If the request doesn't include a TSIG, this member will be NULL.
+ const isc::dns::TSIGRecord* const tsig;
//@}
};
@@ -114,6 +127,7 @@ namespace internal {
// Shortcut typedef
typedef isc::acl::IPCheck<RequestContext> RequestIPCheck;
+typedef isc::acl::dns::NameCheck<RequestContext> RequestKeyCheck;
class RequestCheckCreator : public acl::Loader<RequestContext>::CheckCreator {
public:
diff --git a/src/lib/acl/dnsname_check.h b/src/lib/acl/dnsname_check.h
new file mode 100644
index 0000000..7498d99
--- /dev/null
+++ b/src/lib/acl/dnsname_check.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DNSNAME_CHECK_H
+#define __DNSNAME_CHECK_H 1
+
+#include <dns/name.h>
+
+#include <acl/check.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/// ACL check for DNS names
+///
+/// This class is intended to perform a match between a domain name
+/// specified in an ACL and a given name. The primary usage of this class
+/// is an ACL match for TSIG keys, where an ACL would contain a list of
+/// acceptable key names and the \c match() method would compare the owner
+/// name of a TSIG record against the specified names.
+///
+/// This class could be used for other kinds of names such as the query name
+/// of normal DNS queries.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+template <typename Context>
+class NameCheck : public Check<Context> {
+public:
+ /// The constructor
+ ///
+ /// \exception std::bad_alloc Resource allocation fails in copying the
+ /// name
+ ///
+ /// \param name The domain name to be matched in \c matches().
+ NameCheck(const isc::dns::Name& name) : name_(name) {}
+
+ /// Destructor
+ virtual ~NameCheck() {}
+
+ /// The check method
+ ///
+ /// Matches the passed argument to the condition stored here. Different
+ /// specializations must be provided for different argument types, and the
+ /// program will fail to compile if a required specialisation is not
+ /// provided.
+ ///
+ /// \param context Information to be matched
+ virtual bool matches(const Context& context) const;
+
+ /// Returns the name specified on construction.
+ ///
+ /// This is mainly for testing purposes.
+ ///
+ /// \exception None
+ const isc::dns::Name& getName() const { return (name_); }
+
+private:
+ const isc::dns::Name name_;
+};
+
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+#endif // __DNSNAME_CHECK_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index ce1aec5..6369511 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -16,6 +16,7 @@ run_unittests_SOURCES += acl_test.cc
run_unittests_SOURCES += check_test.cc
run_unittests_SOURCES += dns_test.cc
run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += dnsname_check_unittest.cc
run_unittests_SOURCES += loader_test.cc
run_unittests_SOURCES += logcheck.h
run_unittests_SOURCES += creators.h
@@ -30,6 +31,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
index 3a42af0..b3ddbf4 100644
--- a/src/lib/acl/tests/dns_test.cc
+++ b/src/lib/acl/tests/dns_test.cc
@@ -23,6 +23,11 @@
#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/tsigkey.h>
+#include <dns/tsigrecord.h>
+#include <dns/rdataclass.h>
+
#include <cc/data.h>
#include <acl/dns.h>
#include <acl/loader.h>
@@ -35,6 +40,8 @@
using namespace std;
using boost::scoped_ptr;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
using namespace isc::data;
using namespace isc::acl;
using namespace isc::acl::dns;
@@ -64,8 +71,10 @@ protected:
};
TEST_F(RequestCheckCreatorTest, names) {
- ASSERT_EQ(1, creator_.names().size());
- EXPECT_EQ("from", creator_.names()[0]);
+ const vector<string> names = creator_.names();
+ EXPECT_EQ(2, names.size());
+ EXPECT_TRUE(find(names.begin(), names.end(), "from") != names.end());
+ EXPECT_TRUE(find(names.begin(), names.end(), "key") != names.end());
}
TEST_F(RequestCheckCreatorTest, allowListAbbreviation) {
@@ -93,11 +102,11 @@ TEST_F(RequestCheckCreatorTest, createIPv6Check) {
check_ = creator_.create("from",
Element::fromJSON("\"2001:db8::5300/120\""),
getRequestLoader());
- const dns::internal::RequestIPCheck& ipcheck_ =
+ const dns::internal::RequestIPCheck& ipcheck =
dynamic_cast<const dns::internal::RequestIPCheck&>(*check_);
- EXPECT_EQ(AF_INET6, ipcheck_.getFamily());
- EXPECT_EQ(120, ipcheck_.getPrefixlen());
- const vector<uint8_t> check_address(ipcheck_.getAddress());
+ EXPECT_EQ(AF_INET6, ipcheck.getFamily());
+ EXPECT_EQ(120, ipcheck.getPrefixlen());
+ const vector<uint8_t> check_address(ipcheck.getAddress());
ASSERT_EQ(16, check_address.size());
const uint8_t expected_address[] = { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -106,6 +115,14 @@ TEST_F(RequestCheckCreatorTest, createIPv6Check) {
expected_address));
}
+TEST_F(RequestCheckCreatorTest, createTSIGKeyCheck) {
+ check_ = creator_.create("key", Element::fromJSON("\"key.example.com\""),
+ getRequestLoader());
+ const dns::internal::RequestKeyCheck& keycheck =
+ dynamic_cast<const dns::internal::RequestKeyCheck&>(*check_);
+ EXPECT_EQ(Name("key.example.com"), keycheck.getName());
+}
+
TEST_F(RequestCheckCreatorTest, badCreate) {
// Invalid name
EXPECT_THROW(creator_.create("bad", Element::fromJSON("\"192.0.2.1\""),
@@ -118,12 +135,23 @@ TEST_F(RequestCheckCreatorTest, badCreate) {
EXPECT_THROW(creator_.create("from", Element::fromJSON("[]"),
getRequestLoader()),
isc::data::TypeError);
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("1"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("{}"),
+ getRequestLoader()),
+ isc::data::TypeError);
// Syntax error for IPCheck
EXPECT_THROW(creator_.create("from", Element::fromJSON("\"bad\""),
getRequestLoader()),
isc::InvalidParameter);
+ // Syntax error for Name (key) Check
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("\"bad..name\""),
+ getRequestLoader()),
+ EmptyLabel);
+
// NULL pointer
EXPECT_THROW(creator_.create("from", ConstElementPtr(), getRequestLoader()),
LoaderError);
@@ -140,23 +168,43 @@ protected:
getRequestLoader()));
}
+ // A helper shortcut to create a single Name (key) check for the given
+ // name.
+ ConstRequestCheckPtr createKeyCheck(const string& key_name) {
+ return (creator_.create("key", Element::fromJSON(
+ string("\"") + key_name + string("\"")),
+ getRequestLoader()));
+ }
+
// create a one time request context for a specific test. Note that
// getSockaddr() uses a static storage, so it cannot be called more than
// once in a single test.
- const dns::RequestContext& getRequest4() {
+ const dns::RequestContext& getRequest4(const TSIGRecord* tsig = NULL) {
ipaddr.reset(new IPAddress(tests::getSockAddr("192.0.2.1")));
- request.reset(new dns::RequestContext(*ipaddr));
+ request.reset(new dns::RequestContext(*ipaddr, tsig));
return (*request);
}
- const dns::RequestContext& getRequest6() {
+ const dns::RequestContext& getRequest6(const TSIGRecord* tsig = NULL) {
ipaddr.reset(new IPAddress(tests::getSockAddr("2001:db8::1")));
- request.reset(new dns::RequestContext(*ipaddr));
+ request.reset(new dns::RequestContext(*ipaddr, tsig));
return (*request);
}
+ // create a one time TSIG Record for a specific test. The only parameter
+ // of the record that matters is the key name; others are hardcoded with
+ // arbitrarily chosen values.
+ const TSIGRecord* getTSIGRecord(const string& key_name) {
+ tsig_rdata.reset(new any::TSIG(TSIGKey::HMACMD5_NAME(), 0, 0, 0, NULL,
+ 0, 0, 0, NULL));
+ tsig.reset(new TSIGRecord(Name(key_name), *tsig_rdata));
+ return (tsig.get());
+ }
+
private:
scoped_ptr<IPAddress> ipaddr;
scoped_ptr<dns::RequestContext> request;
+ scoped_ptr<any::TSIG> tsig_rdata;
+ scoped_ptr<TSIGRecord> tsig;
dns::internal::RequestCheckCreator creator_;
};
@@ -184,6 +232,24 @@ TEST_F(RequestCheckTest, checkIPv6) {
EXPECT_FALSE(createIPCheck("32.1.13.184")->matches(getRequest6()));
}
+TEST_F(RequestCheckTest, checkTSIGKey) {
+ EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+ getRequest4(getTSIGRecord("key.example.com"))));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+ getRequest4(getTSIGRecord("badkey.example.com"))));
+
+ // Same for IPv6 (which shouldn't matter)
+ EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+ getRequest6(getTSIGRecord("key.example.com"))));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+ getRequest6(getTSIGRecord("badkey.example.com"))));
+
+ // by default the test request doesn't have a TSIG key, which shouldn't
+ // match any key checks.
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest4()));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest6()));
+}
+
// The following tests test only the creators are registered, they are tested
// elsewhere
diff --git a/src/lib/acl/tests/dnsname_check_unittest.cc b/src/lib/acl/tests/dnsname_check_unittest.cc
new file mode 100644
index 0000000..95b5314
--- /dev/null
+++ b/src/lib/acl/tests/dnsname_check_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+
+#include <acl/dnsname_check.h>
+
+using namespace isc::dns;
+using namespace isc::acl::dns;
+
+// Provide a specialization of the DNSNameCheck::matches() method.
+namespace isc {
+namespace acl {
+namespace dns {
+template <>
+bool NameCheck<Name>::matches(const Name& name) const {
+ return (name_ == name);
+}
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+namespace {
+TEST(DNSNameCheck, construct) {
+ EXPECT_EQ(Name("example.com"),
+ NameCheck<Name>(Name("example.com")).getName());
+
+ // Construct the same check with an explicit trailing dot. Should result
+ // in the same result.
+ EXPECT_EQ(Name("example.com"),
+ NameCheck<Name>(Name("example.com.")).getName());
+}
+
+TEST(DNSNameCheck, match) {
+ NameCheck<Name> check(Name("example.com"));
+ EXPECT_TRUE(check.matches(Name("example.com")));
+ EXPECT_FALSE(check.matches(Name("example.org")));
+
+ // comparison is case insensitive
+ EXPECT_TRUE(check.matches(Name("EXAMPLE.COM")));
+
+ // this is exact match. so super/sub domains don't match
+ EXPECT_FALSE(check.matches(Name("com")));
+ EXPECT_FALSE(check.matches(Name("www.example.com")));
+}
+} // Unnamed namespace
diff --git a/src/lib/cc/data.cc b/src/lib/cc/data.cc
index a455d43..ffa5346 100644
--- a/src/lib/cc/data.cc
+++ b/src/lib/cc/data.cc
@@ -511,6 +511,8 @@ Element::nameToType(const std::string& type_name) {
return (Element::list);
} else if (type_name == "map") {
return (Element::map);
+ } else if (type_name == "named_set") {
+ return (Element::map);
} else if (type_name == "null") {
return (Element::null);
} else if (type_name == "any") {
diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc
index 1621fe3..306c795 100644
--- a/src/lib/config/module_spec.cc
+++ b/src/lib/config/module_spec.cc
@@ -67,10 +67,13 @@ check_config_item(ConstElementPtr spec) {
check_leaf_item(spec, "list_item_spec", Element::map, true);
check_config_item(spec->get("list_item_spec"));
}
- // todo: add stuff for type map
- if (Element::nameToType(spec->get("item_type")->stringValue()) == Element::map) {
+
+ if (spec->get("item_type")->stringValue() == "map") {
check_leaf_item(spec, "map_item_spec", Element::list, true);
check_config_item_list(spec->get("map_item_spec"));
+ } else if (spec->get("item_type")->stringValue() == "named_set") {
+ check_leaf_item(spec, "named_set_item_spec", Element::map, true);
+ check_config_item(spec->get("named_set_item_spec"));
}
}
@@ -286,7 +289,8 @@ check_type(ConstElementPtr spec, ConstElementPtr element) {
return (cur_item_type == "list");
break;
case Element::map:
- return (cur_item_type == "map");
+ return (cur_item_type == "map" ||
+ cur_item_type == "named_set");
break;
}
return (false);
@@ -323,8 +327,20 @@ ModuleSpec::validateItem(ConstElementPtr spec, ConstElementPtr data,
}
}
if (data->getType() == Element::map) {
- if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
- return (false);
+ // either a normal 'map' or a 'named set' (determined by which
+ // subspecification it has)
+ if (spec->contains("map_item_spec")) {
+ if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+ return (false);
+ }
+ } else {
+ typedef std::pair<std::string, ConstElementPtr> maptype;
+
+ BOOST_FOREACH(maptype m, data->mapValue()) {
+ if (!validateItem(spec->get("named_set_item_spec"), m.second, full, errors)) {
+ return (false);
+ }
+ }
}
}
return (true);
diff --git a/src/lib/config/tests/module_spec_unittests.cc b/src/lib/config/tests/module_spec_unittests.cc
index 1b43350..d642af8 100644
--- a/src/lib/config/tests/module_spec_unittests.cc
+++ b/src/lib/config/tests/module_spec_unittests.cc
@@ -211,3 +211,12 @@ TEST(ModuleSpec, CommandValidation) {
EXPECT_EQ(errors->get(0)->stringValue(), "Type mismatch");
}
+
+TEST(ModuleSpec, NamedSetValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec32.spec"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_TRUE(dataTestWithErrors(dd, "data32_1.data", errors));
+ EXPECT_FALSE(dataTest(dd, "data32_2.data"));
+ EXPECT_FALSE(dataTest(dd, "data32_3.data"));
+}
diff --git a/src/lib/config/tests/testdata/Makefile.am b/src/lib/config/tests/testdata/Makefile.am
index 57d1ed3..91d7f04 100644
--- a/src/lib/config/tests/testdata/Makefile.am
+++ b/src/lib/config/tests/testdata/Makefile.am
@@ -22,6 +22,9 @@ EXTRA_DIST += data22_7.data
EXTRA_DIST += data22_8.data
EXTRA_DIST += data22_9.data
EXTRA_DIST += data22_10.data
+EXTRA_DIST += data32_1.data
+EXTRA_DIST += data32_2.data
+EXTRA_DIST += data32_3.data
EXTRA_DIST += spec1.spec
EXTRA_DIST += spec2.spec
EXTRA_DIST += spec3.spec
@@ -53,3 +56,4 @@ EXTRA_DIST += spec28.spec
EXTRA_DIST += spec29.spec
EXTRA_DIST += spec30.spec
EXTRA_DIST += spec31.spec
+EXTRA_DIST += spec32.spec
diff --git a/src/lib/config/tests/testdata/data32_1.data b/src/lib/config/tests/testdata/data32_1.data
new file mode 100644
index 0000000..5695b52
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_1.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": { "foo": 1, "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_2.data b/src/lib/config/tests/testdata/data32_2.data
new file mode 100644
index 0000000..d5b9765
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_2.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": { "foo": "wrongtype", "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_3.data b/src/lib/config/tests/testdata/data32_3.data
new file mode 100644
index 0000000..85f32fe
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_3.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": []
+}
diff --git a/src/lib/config/tests/testdata/spec32.spec b/src/lib/config/tests/testdata/spec32.spec
new file mode 100644
index 0000000..68e774e
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec32.spec
@@ -0,0 +1,19 @@
+{
+ "module_spec": {
+ "module_name": "Spec32",
+ "config_data": [
+ { "item_name": "named_set_item",
+ "item_type": "named_set",
+ "item_optional": false,
+ "item_default": { "a": 1, "b": 2 },
+ "named_set_item_spec": {
+ "item_name": "named_set_element",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 3
+ }
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h
index 03a6967..ccdfa48 100644
--- a/src/lib/datasrc/rbtree.h
+++ b/src/lib/datasrc/rbtree.h
@@ -704,9 +704,9 @@ public:
/// \brief Find with callback and node chain.
///
/// This version of \c find() is specifically designed for the backend
- /// of the \c MemoryZone class, and implements all necessary features
- /// for that purpose. Other applications shouldn't need these additional
- /// features, and should normally use the simpler versions.
+ /// of the \c InMemoryZoneFinder class, and implements all necessary
+ /// features for that purpose. Other applications shouldn't need these
+ /// additional features, and should normally use the simpler versions.
///
/// This version of \c find() calls the callback whenever traversing (on
/// the way from root down the tree) a marked node on the way down through
diff --git a/src/lib/dns/rrtype-placeholder.h b/src/lib/dns/rrtype-placeholder.h
index 1cb028c..dad1b2b 100644
--- a/src/lib/dns/rrtype-placeholder.h
+++ b/src/lib/dns/rrtype-placeholder.h
@@ -22,6 +22,11 @@
#include <exceptions/exceptions.h>
+// Solaris x86 defines DS in <sys/regset.h>, which gets pulled in by Boost
+#if defined(__sun) && defined(DS)
+# undef DS
+#endif
+
namespace isc {
namespace util {
class InputBuffer;
diff --git a/src/lib/python/isc/acl/Makefile.am b/src/lib/python/isc/acl/Makefile.am
index cabc0a3..b1afa15 100644
--- a/src/lib/python/isc/acl/Makefile.am
+++ b/src/lib/python/isc/acl/Makefile.am
@@ -4,10 +4,10 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-python_PYTHON = __init__.py
+python_PYTHON = __init__.py dns.py
pythondir = $(PYTHON_SITEPKG_DIR)/isc/acl
-pyexec_LTLIBRARIES = acl.la dns.la
+pyexec_LTLIBRARIES = acl.la _dns.la
pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/acl
acl_la_SOURCES = acl.cc
@@ -15,14 +15,14 @@ acl_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
acl_la_LDFLAGS = $(PYTHON_LDFLAGS)
acl_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
-dns_la_SOURCES = dns.h dns.cc dns_requestacl_python.h dns_requestacl_python.cc
-dns_la_SOURCES += dns_requestcontext_python.h dns_requestcontext_python.cc
-dns_la_SOURCES += dns_requestloader_python.h dns_requestloader_python.cc
-dns_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
-dns_la_LDFLAGS = $(PYTHON_LDFLAGS)
+_dns_la_SOURCES = dns.h dns.cc dns_requestacl_python.h dns_requestacl_python.cc
+_dns_la_SOURCES += dns_requestcontext_python.h dns_requestcontext_python.cc
+_dns_la_SOURCES += dns_requestloader_python.h dns_requestloader_python.cc
+_dns_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+_dns_la_LDFLAGS = $(PYTHON_LDFLAGS)
# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
# placed after -Wextra defined in AM_CXXFLAGS
-dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+_dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
# Python prefers .so, while some OSes (specifically MacOS) use a different
# suffix for dynamic objects. -module is necessary to work this around.
@@ -30,11 +30,11 @@ acl_la_LDFLAGS += -module
acl_la_LIBADD = $(top_builddir)/src/lib/acl/libacl.la
acl_la_LIBADD += $(PYTHON_LIB)
-dns_la_LDFLAGS += -module
-dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
-dns_la_LIBADD += $(PYTHON_LIB)
+_dns_la_LDFLAGS += -module
+_dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
+_dns_la_LIBADD += $(PYTHON_LIB)
-EXTRA_DIST = acl.py dns.py
+EXTRA_DIST = acl.py _dns.py
EXTRA_DIST += acl_inc.cc
EXTRA_DIST += dnsacl_inc.cc dns_requestacl_inc.cc dns_requestcontext_inc.cc
EXTRA_DIST += dns_requestloader_inc.cc
diff --git a/src/lib/python/isc/acl/_dns.py b/src/lib/python/isc/acl/_dns.py
new file mode 100644
index 0000000..a645a7b
--- /dev/null
+++ b/src/lib/python/isc/acl/_dns.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed; The .so version will be installed into the right
+# place at installation time.
+# This helper script is only to find it in the .libs directory when we run
+# as a test or from the build directory.
+
+import os
+import sys
+
+for base in sys.path[:]:
+ bindingdir = os.path.join(base, 'isc/acl/.libs')
+ if os.path.exists(bindingdir):
+ sys.path.insert(0, bindingdir)
+
+from _dns import *
diff --git a/src/lib/python/isc/acl/dns.cc b/src/lib/python/isc/acl/dns.cc
index 351a8b3..eb3b57b 100644
--- a/src/lib/python/isc/acl/dns.cc
+++ b/src/lib/python/isc/acl/dns.cc
@@ -52,7 +52,7 @@ PyMethodDef methods[] = {
PyModuleDef dnsacl = {
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
- "isc.acl.dns",
+ "isc.acl._dns",
dnsacl_doc,
-1,
methods,
@@ -90,7 +90,7 @@ getACLException(const char* ex_name) {
}
PyMODINIT_FUNC
-PyInit_dns(void) {
+PyInit__dns(void) {
PyObject* mod = PyModule_Create(&dnsacl);
if (mod == NULL) {
return (NULL);
diff --git a/src/lib/python/isc/acl/dns.py b/src/lib/python/isc/acl/dns.py
index 8070559..0733bc3 100644
--- a/src/lib/python/isc/acl/dns.py
+++ b/src/lib/python/isc/acl/dns.py
@@ -13,21 +13,61 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# This file is not installed. The log.so is installed into the right place.
-# It is only to find it in the .libs directory when we run as a test or
-# from the build directory.
-# But as nobody gives us the builddir explicitly (and we can't use generation
-# from .in file, as it would put us into the builddir and we wouldn't be found)
-# we guess from current directory. Any idea for something better? This should
-# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
-# Should we look there? Or define something in bind10_config?
-
-import os
-import sys
-
-for base in sys.path[:]:
- bindingdir = os.path.join(base, 'isc/acl/.libs')
- if os.path.exists(bindingdir):
- sys.path.insert(0, bindingdir)
-
-from dns import *
+"""\
+This module provides Python bindings for the C++ classes in the
+isc::acl::dns namespace. Specifically, it defines Python interfaces of
+handling access control lists (ACLs) with DNS related contexts.
+The actual binding is implemented in an effectively hidden module,
+isc.acl._dns; this frontend module is in terms of implementation so that
+the C++ binding code doesn't have to deal with complicated operations
+that could be done in a more straightforward way in native Python.
+
+For further details of the actual module, see the documentation of the
+_dns module.
+"""
+
+import pydnspp
+
+import isc.acl._dns
+from isc.acl._dns import *
+
+class RequestACL(isc.acl._dns.RequestACL):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestACL.
+
+ See the base class documentation for more implementation.
+ """
+ pass
+
+class RequestLoader(isc.acl._dns.RequestLoader):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestLoader.
+
+ See the base class documentation for more implementation.
+ """
+ pass
+
+class RequestContext(isc.acl._dns.RequestContext):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestContext.
+
+ See the base class documentation for more implementation.
+ """
+
+ def __init__(self, remote_address, tsig=None):
+ """Wrapper for the RequestContext constructor.
+
+ Internal implementation details that the users don't have to
+ worry about: To avoid dealing with pydnspp bindings in the C++ code,
+ this wrapper converts the TSIG record in its wire format in the form
+ of byte data, and has the binding re-construct the record from it.
+ """
+ tsig_wire = b''
+ if tsig is not None:
+ if not isinstance(tsig, pydnspp.TSIGRecord):
+ raise TypeError("tsig must be a TSIGRecord, not %s" %
+ tsig.__class__.__name__)
+ tsig_wire = tsig.to_wire(tsig_wire)
+ isc.acl._dns.RequestContext.__init__(self, remote_address, tsig_wire)
+
+ def __str__(self):
+ """Wrap __str__() to convert the module name."""
+ s = isc.acl._dns.RequestContext.__str__(self)
+ return s.replace('<isc.acl._dns', '<isc.acl.dns')
diff --git a/src/lib/python/isc/acl/dns_requestacl_python.cc b/src/lib/python/isc/acl/dns_requestacl_python.cc
index 5e5acea..1c38a30 100644
--- a/src/lib/python/isc/acl/dns_requestacl_python.cc
+++ b/src/lib/python/isc/acl/dns_requestacl_python.cc
@@ -114,7 +114,7 @@ namespace python {
// Most of the functions are not actually implemented and NULL here.
PyTypeObject requestacl_type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "isc.acl.dns.RequestACL",
+ "isc.acl._dns.RequestACL",
sizeof(s_RequestACL), // tp_basicsize
0, // tp_itemsize
RequestACL_destroy, // tp_dealloc
@@ -132,7 +132,7 @@ PyTypeObject requestacl_type = {
NULL, // tp_getattro
NULL, // tp_setattro
NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
RequestACL_doc,
NULL, // tp_traverse
NULL, // tp_clear
diff --git a/src/lib/python/isc/acl/dns_requestcontext_inc.cc b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
index 9e80e1f..f71bc59 100644
--- a/src/lib/python/isc/acl/dns_requestcontext_inc.cc
+++ b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
@@ -5,18 +5,18 @@ DNS request to be checked.\n\
This plays the role of ACL context for the RequestACL object.\n\
\n\
Based on the minimalist philosophy, the initial implementation only\n\
-maintains the remote (source) IP address of the request. The plan is\n\
-to add more parameters of the request. A scheduled next step is to\n\
-support the TSIG key (if it's included in the request). Other\n\
-possibilities are the local (destination) IP address, the remote and\n\
-local port numbers, various fields of the DNS request (e.g. a\n\
-particular header flag value).\n\
+maintains the remote (source) IP address of the request and\n\
+(optionally) the TSIG record included in the request. We may add more\n\
+parameters of the request as we see the need for them. Possible\n\
+additional parameters are the local (destination) IP address, the\n\
+remote and local port numbers, various fields of the DNS request (e.g.\n\
+a particular header flag value).\n\
\n\
-RequestContext(remote_address)\n\
+RequestContext(remote_address, tsig)\n\
\n\
In this initial implementation, the constructor only takes a\n\
remote IP address in the form of a socket address as used in the\n\
- Python socket module.\n\
+ Python socket module, and optionally a pydnspp.TSIGRecord object.\n\
\n\
Exceptions:\n\
isc.acl.ACLError Normally shouldn't happen, but still possible\n\
@@ -25,6 +25,9 @@ RequestContext(remote_address)\n\
\n\
Parameters:\n\
remote_address The remote IP address\n\
+ tsig The TSIG record included in the request message, if any.\n\
+ If the request doesn't include a TSIG, this will be None.\n\
+ If this parameter is omitted None will be assumed.\n\
\n\
";
} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestcontext_python.cc b/src/lib/python/isc/acl/dns_requestcontext_python.cc
index 6c63b59..7f33f59 100644
--- a/src/lib/python/isc/acl/dns_requestcontext_python.cc
+++ b/src/lib/python/isc/acl/dns_requestcontext_python.cc
@@ -14,7 +14,7 @@
// Enable this if you use s# variants with PyArg_ParseTuple(), see
// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
-//#define PY_SSIZE_T_CLEAN
+#define PY_SSIZE_T_CLEAN
// Python.h needs to be placed at the head of the program file, see:
// http://docs.python.org/py3k/extending/extending.html#a-simple-example
@@ -37,8 +37,16 @@
#include <exceptions/exceptions.h>
+#include <util/buffer.h>
#include <util/python/pycppwrapper_util.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+#include <dns/tsigrecord.h>
+
#include <acl/dns.h>
#include <acl/ip_check.h>
@@ -49,6 +57,8 @@ using namespace std;
using boost::scoped_ptr;
using boost::lexical_cast;
using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
using namespace isc::util::python;
using namespace isc::acl::dns;
using namespace isc::acl::dns::python;
@@ -59,11 +69,39 @@ namespace dns {
namespace python {
struct s_RequestContext::Data {
- // The constructor. Currently it only accepts the information of the
- // request source address, and contains all necessary logic in the body
- // of the constructor. As it's extended we may have refactor it by
- // introducing helper methods.
- Data(const char* const remote_addr, const unsigned short remote_port) {
+ // The constructor.
+ Data(const char* const remote_addr, const unsigned short remote_port,
+ const char* tsig_data, const Py_ssize_t tsig_len)
+ {
+ createRemoteAddr(remote_addr, remote_port);
+ createTSIGRecord(tsig_data, tsig_len);
+ }
+
+ // A convenient type converter from sockaddr_storage to sockaddr
+ const struct sockaddr& getRemoteSockaddr() const {
+ const void* p = &remote_ss;
+ return (*static_cast<const struct sockaddr*>(p));
+ }
+
+ // The remote (source) IP address of the request. Note that it needs
+ // a reference to remote_ss. That's why the latter is stored within
+ // this structure.
+ scoped_ptr<IPAddress> remote_ipaddr;
+
+ // The effective length of remote_ss. It's necessary for getnameinfo()
+ // called from sockaddrToText (__str__ backend).
+ socklen_t remote_salen;
+
+ // The TSIG record included in the request, if any. If the request
+ // doesn't contain a TSIG, this will be NULL.
+ scoped_ptr<TSIGRecord> tsig_record;
+
+private:
+ // A helper method for the constructor that is responsible for constructing
+ // the remote address.
+ void createRemoteAddr(const char* const remote_addr,
+ const unsigned short remote_port)
+ {
struct addrinfo hints, *res;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
@@ -85,20 +123,31 @@ struct s_RequestContext::Data {
remote_ipaddr.reset(new IPAddress(getRemoteSockaddr()));
}
- // A convenient type converter from sockaddr_storage to sockaddr
- const struct sockaddr& getRemoteSockaddr() const {
- const void* p = &remote_ss;
- return (*static_cast<const struct sockaddr*>(p));
- }
-
- // The remote (source) IP address the request. Note that it needs
- // a reference to remote_ss. That's why the latter is stored within
- // this structure.
- scoped_ptr<IPAddress> remote_ipaddr;
+ // A helper method for the constructor that is responsible for constructing
+ // the request TSIG.
+ void createTSIGRecord(const char* tsig_data, const Py_ssize_t tsig_len) {
+ if (tsig_len == 0) {
+ return;
+ }
- // The effective length of remote_ss. It's necessary for getnameinf()
- // called from sockaddrToText (__str__ backend).
- socklen_t remote_salen;
+ // Re-construct the TSIG record from the passed binary. This should
+ // normally succeed because we are generally expected to be called
+ // from the frontend .py, which converts a valid TSIGRecord in its
+ // wire format. If some evil or buggy python program directly calls
+ // us with bogus data, validation in libdns++ will trigger an
+ // exception, which will be caught and converted to a Python exception
+ // in RequestContext_init().
+ isc::util::InputBuffer b(tsig_data, tsig_len);
+ const Name key_name(b);
+ const RRType tsig_type(b.readUint16());
+ const RRClass tsig_class(b.readUint16());
+ const RRTTL ttl(b.readUint32());
+ const size_t rdlen(b.readUint16());
+ const ConstRdataPtr rdata = createRdata(tsig_type, tsig_class, b,
+ rdlen);
+ tsig_record.reset(new TSIGRecord(key_name, tsig_class, ttl,
+ *rdata, 0));
+ }
private:
struct sockaddr_storage remote_ss;
@@ -145,31 +194,41 @@ RequestContext_init(PyObject* po_self, PyObject* args, PyObject*) {
s_RequestContext* const self = static_cast<s_RequestContext*>(po_self);
try {
- // In this initial implementation, the constructor is simply: It
- // takes a single parameter, which should be a Python socket address
- // object. For IPv4, it's ('address test', numeric_port); for IPv6,
+ // In this initial implementation, the constructor is simple: It
+ // takes two parameters. The first parameter should be a Python
+ // socket address object.
+ // For IPv4, it's ('address test', numeric_port); for IPv6,
// it's ('address text', num_port, num_flowid, num_zoneid).
+ // The second parameter is wire-format TSIG record in the form of
+ // Python byte data. If the TSIG isn't included in the request,
+ // its length will be 0.
// Below, we parse the argument in the most straightforward way.
// As the constructor becomes more complicated, we should probably
// make it more structural (for example, we should first retrieve
- // the socket address as a PyObject, and parse it recursively)
+ // the python objects, and parse them recursively)
const char* remote_addr;
unsigned short remote_port;
unsigned int remote_flowinfo; // IPv6 only, unused here
unsigned int remote_zoneid; // IPv6 only, unused here
-
- if (PyArg_ParseTuple(args, "(sH)", &remote_addr, &remote_port) ||
- PyArg_ParseTuple(args, "(sHII)", &remote_addr, &remote_port,
- &remote_flowinfo, &remote_zoneid))
+ const char* tsig_data;
+ Py_ssize_t tsig_len;
+
+ if (PyArg_ParseTuple(args, "(sH)y#", &remote_addr, &remote_port,
+ &tsig_data, &tsig_len) ||
+ PyArg_ParseTuple(args, "(sHII)y#", &remote_addr, &remote_port,
+ &remote_flowinfo, &remote_zoneid,
+ &tsig_data, &tsig_len))
{
- // We need to clear the error in case the first call to PareTuple
+ // We need to clear the error in case the first call to ParseTuple
// fails.
PyErr_Clear();
auto_ptr<s_RequestContext::Data> dataptr(
- new s_RequestContext::Data(remote_addr, remote_port));
- self->cppobj = new RequestContext(*dataptr->remote_ipaddr);
+ new s_RequestContext::Data(remote_addr, remote_port,
+ tsig_data, tsig_len));
+ self->cppobj = new RequestContext(*dataptr->remote_ipaddr,
+ dataptr->tsig_record.get());
self->data_ = dataptr.release();
return (0);
}
@@ -224,7 +283,11 @@ RequestContext_str(PyObject* po_self) {
objss << "<" << requestcontext_type.tp_name << " object, "
<< "remote_addr="
<< sockaddrToText(self->data_->getRemoteSockaddr(),
- self->data_->remote_salen) << ">";
+ self->data_->remote_salen);
+ if (self->data_->tsig_record) {
+ objss << ", key=" << self->data_->tsig_record->getName();
+ }
+ objss << ">";
return (Py_BuildValue("s", objss.str().c_str()));
} catch (const exception& ex) {
const string ex_what =
@@ -248,7 +311,7 @@ namespace python {
// Most of the functions are not actually implemented and NULL here.
PyTypeObject requestcontext_type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "isc.acl.dns.RequestContext",
+ "isc.acl._dns.RequestContext",
sizeof(s_RequestContext), // tp_basicsize
0, // tp_itemsize
RequestContext_destroy, // tp_dealloc
@@ -266,7 +329,7 @@ PyTypeObject requestcontext_type = {
NULL, // tp_getattro
NULL, // tp_setattro
NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
RequestContext_doc,
NULL, // tp_traverse
NULL, // tp_clear
diff --git a/src/lib/python/isc/acl/dns_requestloader_python.cc b/src/lib/python/isc/acl/dns_requestloader_python.cc
index 1ddff4c..ab421c5 100644
--- a/src/lib/python/isc/acl/dns_requestloader_python.cc
+++ b/src/lib/python/isc/acl/dns_requestloader_python.cc
@@ -171,7 +171,7 @@ namespace python {
// Most of the functions are not actually implemented and NULL here.
PyTypeObject requestloader_type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "isc.acl.dns.RequestLoader",
+ "isc.acl._dns.RequestLoader",
sizeof(s_RequestLoader), // tp_basicsize
0, // tp_itemsize
RequestLoader_destroy, // tp_dealloc
@@ -189,7 +189,7 @@ PyTypeObject requestloader_type = {
NULL, // tp_getattro
NULL, // tp_setattro
NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
RequestLoader_doc,
NULL, // tp_traverse
NULL, // tp_clear
diff --git a/src/lib/python/isc/acl/tests/Makefile.am b/src/lib/python/isc/acl/tests/Makefile.am
index 64737d2..87781d7 100644
--- a/src/lib/python/isc/acl/tests/Makefile.am
+++ b/src/lib/python/isc/acl/tests/Makefile.am
@@ -19,7 +19,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/lib/isc/python/acl/.libs:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/isc/python/acl/.libs:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/acl/tests/dns_test.py b/src/lib/python/isc/acl/tests/dns_test.py
index acaf32b..7ee3023 100644
--- a/src/lib/python/isc/acl/tests/dns_test.py
+++ b/src/lib/python/isc/acl/tests/dns_test.py
@@ -15,6 +15,7 @@
import unittest
import socket
+from pydnspp import *
from isc.acl.acl import LoaderError, Error, ACCEPT, REJECT, DROP
from isc.acl.dns import *
@@ -39,12 +40,37 @@ def get_acl_json(prefix):
json[0]["from"] = prefix
return REQUEST_LOADER.load(json)
-def get_context(address):
+# The following two are similar to the previous two, but use a TSIG key name
+# instead of IP prefix.
+def get_tsig_acl(key):
+ return REQUEST_LOADER.load('[{"action": "ACCEPT", "key": "' + \
+ key + '"}]')
+
+def get_tsig_acl_json(key):
+ json = [{"action": "ACCEPT"}]
+ json[0]["key"] = key
+ return REQUEST_LOADER.load(json)
+
+# commonly used TSIG RDATA. For the purpose of ACL checks only the key name
+# matters; other parrameters are simply borrowed from some other tests, which
+# can be anything for the purpose of the tests here.
+TSIG_RDATA = TSIG("hmac-md5.sig-alg.reg.int. 1302890362 " + \
+ "300 16 2tra2tra2tra2tra2tra2g== " + \
+ "11621 0 0")
+
+def get_context(address, key_name=None):
'''This is a simple shortcut wrapper for creating a RequestContext
- object with a given IP address. Port number doesn't matter in the test
- (as of the initial implementation), so it's fixed for simplicity.
+ object with a given IP address and optionally TSIG key name.
+ Port number doesn't matter in the test (as of the initial implementation),
+ so it's fixed for simplicity.
+ If key_name is not None, it internally creates a (faked) TSIG record
+ and constructs a context with that key. Note that only the key name
+ matters for the purpose of ACL checks.
'''
- return RequestContext(get_sockaddr(address, 53000))
+ tsig_record = None
+ if key_name is not None:
+ tsig_record = TSIGRecord(Name(key_name), TSIG_RDATA)
+ return RequestContext(get_sockaddr(address, 53000), tsig_record)
# These are commonly used RequestContext object
CONTEXT4 = get_context('192.0.2.1')
@@ -63,6 +89,21 @@ class RequestContextTest(unittest.TestCase):
RequestContext(('2001:db8::1234', 53006,
0, 0)).__str__())
+ # Construct the context from IP address and a TSIG record.
+ tsig_record = TSIGRecord(Name("key.example.com"), TSIG_RDATA)
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001, ' + \
+ 'key=key.example.com.>',
+ RequestContext(('192.0.2.1', 53001),
+ tsig_record).__str__())
+
+ # same with IPv6 address, just in case.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006, ' + \
+ 'key=key.example.com.>',
+ RequestContext(('2001:db8::1234', 53006,
+ 0, 0), tsig_record).__str__())
+
# Unusual case: port number overflows (this constructor allows that,
# although it should be rare anyway; the socket address should
# normally come from the Python socket module.
@@ -89,7 +130,9 @@ class RequestContextTest(unittest.TestCase):
# not a tuple
self.assertRaises(TypeError, RequestContext, 1)
# invalid number of parameters
- self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), 0)
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), 0, 1)
+ # type error for TSIG
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), tsig=1)
# tuple is not in the form of sockaddr
self.assertRaises(TypeError, RequestContext, (0, 53))
self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 'http'))
@@ -159,10 +202,22 @@ class RequestACLTest(unittest.TestCase):
self.assertRaises(LoaderError, REQUEST_LOADER.load,
[{"action": "ACCEPT", "from": []}])
self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "key": 1}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": 1}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "key": {}}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": {}}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
'[{"action": "ACCEPT", "from": "bad"}]')
self.assertRaises(LoaderError, REQUEST_LOADER.load,
[{"action": "ACCEPT", "from": "bad"}])
self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": "bad..name"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": "bad..name"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
'[{"action": "ACCEPT", "from": null}]')
self.assertRaises(LoaderError, REQUEST_LOADER.load,
[{"action": "ACCEPT", "from": None}])
@@ -237,6 +292,28 @@ class RequestACLTest(unittest.TestCase):
self.assertEqual(REJECT, get_acl('32.1.13.184').execute(CONTEXT6))
self.assertEqual(REJECT, get_acl_json('32.1.13.184').execute(CONTEXT6))
+ # TSIG checks, derived from dns_test.cc
+ self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+ execute(get_context('192.0.2.1',
+ 'key.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(get_context('192.0.2.1',
+ 'badkey.example.com')))
+ self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+ execute(get_context('2001:db8::1',
+ 'key.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(get_context('2001:db8::1',
+ 'badkey.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+ execute(CONTEXT4))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(CONTEXT4))
+ self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+ execute(CONTEXT6))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(CONTEXT6))
+
# A bit more complicated example, derived from resolver_config_unittest
acl = REQUEST_LOADER.load('[ {"action": "ACCEPT", ' +
' "from": "192.0.2.1"},' +
diff --git a/src/lib/python/isc/cc/data.py b/src/lib/python/isc/cc/data.py
index ce1bba0..76ef942 100644
--- a/src/lib/python/isc/cc/data.py
+++ b/src/lib/python/isc/cc/data.py
@@ -22,8 +22,22 @@
import json
-class DataNotFoundError(Exception): pass
-class DataTypeError(Exception): pass
+class DataNotFoundError(Exception):
+ """Raised if an identifier does not exist according to a spec file,
+ or if an item is addressed that is not in the current (or default)
+ config (such as a nonexistent list or map element)"""
+ pass
+
+class DataAlreadyPresentError(Exception):
+ """Raised if there is an attemt to add an element to a list or a
+ map that is already present in that list or map (i.e. if 'add'
+ is used when it should be 'set')"""
+ pass
+
+class DataTypeError(Exception):
+ """Raised if there is an attempt to set an element that is of a
+ different type than the type specified in the specification."""
+ pass
def remove_identical(a, b):
"""Removes the values from dict a that are the same as in dict b.
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 06a7f0f..8150729 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -312,7 +312,7 @@ class ModuleCCSession(ConfigData):
module_spec = isc.config.module_spec_from_file(spec_file_name)
module_cfg = ConfigData(module_spec)
module_name = module_spec.get_module_name()
- self._session.group_subscribe(module_name);
+ self._session.group_subscribe(module_name)
# Get the current config for that module now
seq = self._session.group_sendmsg(create_command(COMMAND_GET_CONFIG, { "module_name": module_name }), "ConfigManager")
@@ -327,7 +327,7 @@ class ModuleCCSession(ConfigData):
rcode, value = parse_answer(answer)
if rcode == 0:
if value != None and module_spec.validate_config(False, value):
- module_cfg.set_local_config(value);
+ module_cfg.set_local_config(value)
if config_update_callback is not None:
config_update_callback(value, module_cfg)
@@ -377,7 +377,7 @@ class ModuleCCSession(ConfigData):
if self.get_module_spec().validate_config(False,
value,
errors):
- self.set_local_config(value);
+ self.set_local_config(value)
if self._config_handler:
self._config_handler(value)
else:
@@ -414,8 +414,8 @@ class UIModuleCCSession(MultiConfigData):
self.set_specification(isc.config.ModuleSpec(specs[module]))
def update_specs_and_config(self):
- self.request_specifications();
- self.request_current_config();
+ self.request_specifications()
+ self.request_current_config()
def request_current_config(self):
"""Requests the current configuration from the configuration
@@ -425,47 +425,90 @@ class UIModuleCCSession(MultiConfigData):
raise ModuleCCSessionError("Bad config version")
self._set_current_config(config)
-
- def add_value(self, identifier, value_str = None):
- """Add a value to a configuration list. Raises a DataTypeError
- if the value does not conform to the list_item_spec field
- of the module config data specification. If value_str is
- not given, we add the default as specified by the .spec
- file."""
- module_spec = self.find_spec_part(identifier)
- if (type(module_spec) != dict or "list_item_spec" not in module_spec):
- raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
-
+ def _add_value_to_list(self, identifier, value):
cur_list, status = self.get_value(identifier)
if not cur_list:
cur_list = []
- # Hmm. Do we need to check for duplicates?
- value = None
- if value_str is not None:
- value = isc.cc.data.parse_value_str(value_str)
- else:
+ if value is None:
if "item_default" in module_spec["list_item_spec"]:
value = module_spec["list_item_spec"]["item_default"]
if value is None:
- raise isc.cc.data.DataNotFoundError("No value given and no default for " + str(identifier))
-
+ raise isc.cc.data.DataNotFoundError(
+ "No value given and no default for " + str(identifier))
+
if value not in cur_list:
cur_list.append(value)
self.set_value(identifier, cur_list)
+ else:
+ raise isc.cc.data.DataAlreadyPresentError(value +
+ " already in "
+ + identifier)
+
+ def _add_value_to_named_set(self, identifier, value, item_value):
+ if type(value) != str:
+ raise isc.cc.data.DataTypeError("Name for named_set " +
+ identifier +
+ " must be a string")
+ # fail on both None and empty string
+ if not value:
+ raise isc.cc.data.DataNotFoundError(
+ "Need a name to add a new item to named_set " +
+ str(identifier))
+ else:
+ cur_map, status = self.get_value(identifier)
+ if not cur_map:
+ cur_map = {}
+ if value not in cur_map:
+ cur_map[value] = item_value
+ self.set_value(identifier, cur_map)
+ else:
+ raise isc.cc.data.DataAlreadyPresentError(value +
+ " already in "
+ + identifier)
- def remove_value(self, identifier, value_str):
- """Remove a value from a configuration list. The value string
- must be a string representation of the full item. Raises
- a DataTypeError if the value at the identifier is not a list,
- or if the given value_str does not match the list_item_spec
- """
+ def add_value(self, identifier, value_str = None, set_value_str = None):
+ """Add a value to a configuration list. Raises a DataTypeError
+ if the value does not conform to the list_item_spec field
+ of the module config data specification. If value_str is
+ not given, we add the default as specified by the .spec
+ file. Raises a DataNotFoundError if the given identifier
+ is not specified in the specification as a map or list.
+ Raises a DataAlreadyPresentError if the specified element
+ already exists."""
module_spec = self.find_spec_part(identifier)
- if (type(module_spec) != dict or "list_item_spec" not in module_spec):
- raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
+ if module_spec is None:
+ raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+ # the specified element must be a list or a named_set
+ if 'list_item_spec' in module_spec:
+ value = None
+ # in lists, we might get the value with spaces, making it
+ # the third argument. In that case we interpret both as
+ # one big string meant as the value
+ if value_str is not None:
+ if set_value_str is not None:
+ value_str += set_value_str
+ value = isc.cc.data.parse_value_str(value_str)
+ self._add_value_to_list(identifier, value)
+ elif 'named_set_item_spec' in module_spec:
+ item_name = None
+ item_value = None
+ if value_str is not None:
+ item_name = isc.cc.data.parse_value_str(value_str)
+ if set_value_str is not None:
+ item_value = isc.cc.data.parse_value_str(set_value_str)
+ else:
+ if 'item_default' in module_spec['named_set_item_spec']:
+ item_value = module_spec['named_set_item_spec']['item_default']
+ self._add_value_to_named_set(identifier, item_name,
+ item_value)
+ else:
+ raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named set")
- if value_str is None:
+ def _remove_value_from_list(self, identifier, value):
+ if value is None:
# we are directly removing an list index
id, list_indices = isc.cc.data.split_identifier_list_indices(identifier)
if list_indices is None:
@@ -473,17 +516,52 @@ class UIModuleCCSession(MultiConfigData):
else:
self.set_value(identifier, None)
else:
- value = isc.cc.data.parse_value_str(value_str)
- isc.config.config_data.check_type(module_spec, [value])
cur_list, status = self.get_value(identifier)
- #if not cur_list:
- # cur_list = isc.cc.data.find_no_exc(self.config.data, identifier)
if not cur_list:
cur_list = []
- if value in cur_list:
+ elif value in cur_list:
cur_list.remove(value)
self.set_value(identifier, cur_list)
+ def _remove_value_from_named_set(self, identifier, value):
+ if value is None:
+ raise isc.cc.data.DataNotFoundError("Need a name to remove an item from named_set " + str(identifier))
+ elif type(value) != str:
+ raise isc.cc.data.DataTypeError("Name for named_set " + identifier + " must be a string")
+ else:
+ cur_map, status = self.get_value(identifier)
+ if not cur_map:
+ cur_map = {}
+ if value in cur_map:
+ del cur_map[value]
+ else:
+ raise isc.cc.data.DataNotFoundError(value + " not found in named_set " + str(identifier))
+
+ def remove_value(self, identifier, value_str):
+ """Remove a value from a configuration list or named set.
+ The value string must be a string representation of the full
+ item. Raises a DataTypeError if the value at the identifier
+ is not a list, or if the given value_str does not match the
+ list_item_spec """
+ module_spec = self.find_spec_part(identifier)
+ if module_spec is None:
+ raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+ value = None
+ if value_str is not None:
+ value = isc.cc.data.parse_value_str(value_str)
+
+ if 'list_item_spec' in module_spec:
+ if value is not None:
+ isc.config.config_data.check_type(module_spec['list_item_spec'], value)
+ self._remove_value_from_list(identifier, value)
+ elif 'named_set_item_spec' in module_spec:
+ self._remove_value_from_named_set(identifier, value)
+ else:
+ raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named_set")
+
+
+
def commit(self):
"""Commit all local changes, send them through b10-cmdctl to
the configuration manager"""
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index 1efe4a9..fabd37d 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -145,6 +145,8 @@ def _find_spec_part_single(cur_spec, id_part):
return cur_spec['list_item_spec']
# not found
raise isc.cc.data.DataNotFoundError(id + " not found")
+ elif type(cur_spec) == dict and 'named_set_item_spec' in cur_spec.keys():
+ return cur_spec['named_set_item_spec']
elif type(cur_spec) == list:
for cur_spec_item in cur_spec:
if cur_spec_item['item_name'] == id:
@@ -191,11 +193,14 @@ def spec_name_list(spec, prefix="", recurse=False):
result.extend(spec_name_list(map_el['map_item_spec'], prefix + map_el['item_name'], recurse))
else:
result.append(prefix + name)
+ elif 'named_set_item_spec' in spec:
+ # we added a '/' above, but in this one case we don't want it
+ result.append(prefix[:-1])
else:
for name in spec:
result.append(prefix + name + "/")
if recurse:
- result.extend(spec_name_list(spec[name],name, recurse))
+ result.extend(spec_name_list(spec[name], name, recurse))
elif type(spec) == list:
for list_el in spec:
if 'item_name' in list_el:
@@ -207,7 +212,7 @@ def spec_name_list(spec, prefix="", recurse=False):
else:
raise ConfigDataError("Bad specification")
else:
- raise ConfigDataError("Bad specication")
+ raise ConfigDataError("Bad specification")
return result
class ConfigData:
@@ -255,7 +260,7 @@ class ConfigData:
def get_local_config(self):
"""Returns the non-default config values in a dict"""
- return self.data;
+ return self.data
def get_item_list(self, identifier = None, recurse = False):
"""Returns a list of strings containing the full identifiers of
@@ -412,7 +417,39 @@ class MultiConfigData:
item_id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
id_list = module + "/" + id_prefix + "/" + item_id
id_prefix += "/" + id_part
- if list_indices is not None:
+ part_spec = find_spec_part(self._specifications[module].get_config_spec(), id_prefix)
+ if part_spec['item_type'] == 'named_set':
+ # For named sets, the identifier is partly defined
+ # by which values are actually present, and not
+ # purely by the specification.
+ # So if there is a part of the identifier left,
+ # we need to look up the value, then see if that
+ # contains the next part of the identifier we got
+ if len(id_parts) == 0:
+ if 'item_default' in part_spec:
+ return part_spec['item_default']
+ else:
+ return None
+ id_part = id_parts.pop(0)
+
+ named_set_value, type = self.get_value(id_list)
+ if id_part in named_set_value:
+ if len(id_parts) > 0:
+ # we are looking for the *default* value.
+ # so if not present in here, we need to
+ # lookup the one from the spec
+ rest_of_id = "/".join(id_parts)
+ result = isc.cc.data.find_no_exc(named_set_value[id_part], rest_of_id)
+ if result is None:
+ spec_part = self.find_spec_part(identifier)
+ if 'item_default' in spec_part:
+ return spec_part['item_default']
+ return result
+ else:
+ return named_set_value[id_part]
+ else:
+ return None
+ elif list_indices is not None:
# there's actually two kinds of default here for
# lists; they can have a default value (like an
# empty list), but their elements can also have
@@ -449,7 +486,12 @@ class MultiConfigData:
spec = find_spec_part(self._specifications[module].get_config_spec(), id)
if 'item_default' in spec:
- return spec['item_default']
+ # one special case, named_set
+ if spec['item_type'] == 'named_set':
+ print("is " + id_part + " in named set?")
+ return spec['item_default']
+ else:
+ return spec['item_default']
else:
return None
@@ -493,7 +535,7 @@ class MultiConfigData:
spec_part_list = spec_part['list_item_spec']
list_value, status = self.get_value(identifier)
if list_value is None:
- raise isc.cc.data.DataNotFoundError(identifier)
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
if type(list_value) != list:
# the identifier specified a single element
@@ -509,12 +551,38 @@ class MultiConfigData:
for i in range(len(list_value)):
self._append_value_item(result, spec_part_list, "%s[%d]" % (identifier, i), all)
elif item_type == "map":
+ value, status = self.get_value(identifier)
# just show the specific contents of a map, we are
# almost never interested in just its name
spec_part_map = spec_part['map_item_spec']
self._append_value_item(result, spec_part_map, identifier, all)
+ elif item_type == "named_set":
+ value, status = self.get_value(identifier)
+
+ # show just the one entry, when either the map is empty,
+ # or when this is element is not requested specifically
+ if len(value.keys()) == 0:
+ entry = _create_value_map_entry(identifier,
+ item_type,
+ {}, status)
+ result.append(entry)
+ elif not first and not all:
+ entry = _create_value_map_entry(identifier,
+ item_type,
+ None, status)
+ result.append(entry)
+ else:
+ spec_part_named_set = spec_part['named_set_item_spec']
+ for entry in value:
+ self._append_value_item(result,
+ spec_part_named_set,
+ identifier + "/" + entry,
+ all)
else:
value, status = self.get_value(identifier)
+ if status == self.NONE and not spec_part['item_optional']:
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
+
entry = _create_value_map_entry(identifier,
item_type,
value, status)
@@ -569,7 +637,7 @@ class MultiConfigData:
spec_part = spec_part['list_item_spec']
check_type(spec_part, value)
else:
- raise isc.cc.data.DataNotFoundError(identifier)
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
# Since we do not support list diffs (yet?), we need to
# copy the currently set list of items to _local_changes
@@ -579,15 +647,50 @@ class MultiConfigData:
cur_id_part = '/'
for id_part in id_parts:
id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
+ cur_value, status = self.get_value(cur_id_part + id)
+ # Check if the value was there in the first place
+ if status == MultiConfigData.NONE and cur_id_part != "/":
+ raise isc.cc.data.DataNotFoundError(id_part +
+ " not found in " +
+ cur_id_part)
if list_indices is not None:
- cur_list, status = self.get_value(cur_id_part + id)
+ # And check if we don't set something outside of any
+ # list
+ cur_list = cur_value
+ for list_index in list_indices:
+ if list_index >= len(cur_list):
+ raise isc.cc.data.DataNotFoundError("No item " +
+ str(list_index) + " in " + id_part)
+ else:
+ cur_list = cur_list[list_index]
if status != MultiConfigData.LOCAL:
isc.cc.data.set(self._local_changes,
cur_id_part + id,
- cur_list)
+ cur_value)
cur_id_part = cur_id_part + id_part + "/"
isc.cc.data.set(self._local_changes, identifier, value)
-
+
+ def _get_list_items(self, item_name):
+ """This method is used in get_config_item_list, to add list
+ indices and named_set names to the completion list. If
+ the given item_name is for a list or named_set, it'll
+ return a list of those (appended to item_name), otherwise
+ the list will only contain the item_name itself."""
+ spec_part = self.find_spec_part(item_name)
+ if 'item_type' in spec_part and \
+ spec_part['item_type'] == 'named_set':
+ subslash = ""
+ if spec_part['named_set_item_spec']['item_type'] == 'map' or\
+ spec_part['named_set_item_spec']['item_type'] == 'named_set':
+ subslash = "/"
+ values, status = self.get_value(item_name)
+ if len(values) > 0:
+ return [ item_name + "/" + v + subslash for v in values.keys() ]
+ else:
+ return [ item_name ]
+ else:
+ return [ item_name ]
+
def get_config_item_list(self, identifier = None, recurse = False):
"""Returns a list of strings containing the item_names of
the child items at the given identifier. If no identifier is
@@ -598,7 +701,11 @@ class MultiConfigData:
if identifier.startswith("/"):
identifier = identifier[1:]
spec = self.find_spec_part(identifier)
- return spec_name_list(spec, identifier + "/", recurse)
+ spec_list = spec_name_list(spec, identifier + "/", recurse)
+ result_list = []
+ for spec_name in spec_list:
+ result_list.extend(self._get_list_items(spec_name))
+ return result_list
else:
if recurse:
id_list = []
diff --git a/src/lib/python/isc/config/module_spec.py b/src/lib/python/isc/config/module_spec.py
index 6171149..9aa49e0 100644
--- a/src/lib/python/isc/config/module_spec.py
+++ b/src/lib/python/isc/config/module_spec.py
@@ -229,7 +229,7 @@ def _check_item_spec(config_item):
item_type = config_item["item_type"]
if type(item_type) != str:
raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
- if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+ if item_type not in ["integer", "real", "boolean", "string", "list", "map", "named_set", "any"]:
raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
if "item_optional" in config_item:
if type(config_item["item_optional"]) != bool:
@@ -293,6 +293,10 @@ def _validate_type(spec, value, errors):
if errors != None:
errors.append(str(value) + " should be a map")
return False
+ elif data_type == "named_set" and type(value) != dict:
+ if errors != None:
+ errors.append(str(value) + " should be a map")
+ return False
else:
return True
@@ -308,8 +312,16 @@ def _validate_item(spec, full, data, errors):
if not _validate_item(list_spec, full, data_el, errors):
return False
elif type(data) == dict:
- if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
- return False
+ if 'map_item_spec' in spec:
+ if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
+ return False
+ else:
+ named_set_spec = spec['named_set_item_spec']
+ for data_el in data.values():
+ if not _validate_type(named_set_spec, data_el, errors):
+ return False
+ if not _validate_item(named_set_spec, full, data_el, errors):
+ return False
return True
def _validate_spec(spec, full, data, errors):
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index ada0c8a..c820ad9 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -695,6 +695,12 @@ class TestUIModuleCCSession(unittest.TestCase):
fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
return UIModuleCCSession(fake_conn)
+ def create_uccs_named_set(self, fake_conn):
+ module_spec = isc.config.module_spec_from_file(self.spec_file("spec32.spec"))
+ fake_conn.set_get_answer('/module_spec', { module_spec.get_module_name(): module_spec.get_full_spec()})
+ fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
+ return UIModuleCCSession(fake_conn)
+
def test_init(self):
fake_conn = fakeUIConn()
fake_conn.set_get_answer('/module_spec', {})
@@ -715,12 +721,14 @@ class TestUIModuleCCSession(unittest.TestCase):
def test_add_remove_value(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs2(fake_conn)
+
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, 1, "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "no_such_item", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "Spec2/item1", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, 1, "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "no_such_item", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "Spec2/item1", "a")
+
self.assertEqual({}, uccs._local_changes)
uccs.add_value("Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['a', 'b', 'foo']}}, uccs._local_changes)
@@ -730,11 +738,36 @@ class TestUIModuleCCSession(unittest.TestCase):
uccs.remove_value("Spec2/item5", "foo")
uccs.add_value("Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
- uccs.add_value("Spec2/item5", "foo")
+ self.assertRaises(isc.cc.data.DataAlreadyPresentError,
+ uccs.add_value, "Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.remove_value, "Spec2/item5[123]", None)
uccs.remove_value("Spec2/item5[0]", None)
self.assertEqual({'Spec2': {'item5': []}}, uccs._local_changes)
+ def test_add_remove_value_named_set(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_named_set(fake_conn)
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'a': 1, 'b': 2}, value)
+ uccs.add_value("/Spec32/named_set_item", "foo")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'a': 1, 'b': 2, 'foo': 3}, value)
+
+ uccs.remove_value("/Spec32/named_set_item", "a")
+ uccs.remove_value("/Spec32/named_set_item", "foo")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'b': 2}, value)
+
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.set_value,
+ "/Spec32/named_set_item/no_such_item",
+ 4)
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.remove_value, "/Spec32/named_set_item",
+ "no_such_item")
+
def test_commit(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs2(fake_conn)
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index fc1bffa..0dd441d 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -236,6 +236,7 @@ class TestConfigData(unittest.TestCase):
value, default = self.cd.get_value("item6/value2")
self.assertEqual(None, value)
self.assertEqual(False, default)
+ self.assertRaises(isc.cc.data.DataNotFoundError, self.cd.get_value, "item6/no_such_item")
def test_get_default_value(self):
self.assertEqual(1, self.cd.get_default_value("item1"))
@@ -360,7 +361,7 @@ class TestMultiConfigData(unittest.TestCase):
def test_get_current_config(self):
cf = { 'module1': { 'item1': 2, 'item2': True } }
- self.mcd._set_current_config(cf);
+ self.mcd._set_current_config(cf)
self.assertEqual(cf, self.mcd.get_current_config())
def test_get_local_changes(self):
@@ -421,6 +422,17 @@ class TestMultiConfigData(unittest.TestCase):
value = self.mcd.get_default_value("Spec2/no_such_item/asdf")
self.assertEqual(None, value)
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ value = self.mcd.get_default_value("Spec32/named_set_item")
+ self.assertEqual({ 'a': 1, 'b': 2}, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/a")
+ self.assertEqual(1, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/b")
+ self.assertEqual(2, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/no_such_item")
+ self.assertEqual(None, value)
+
def test_get_value(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
self.mcd.set_specification(module_spec)
@@ -544,6 +556,29 @@ class TestMultiConfigData(unittest.TestCase):
maps = self.mcd.get_value_maps("/Spec22/value9")
self.assertEqual(expected, maps)
+ def test_get_value_maps_named_set(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ maps = self.mcd.get_value_maps()
+ self.assertEqual([{'default': False, 'type': 'module',
+ 'name': 'Spec32', 'value': None,
+ 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False},
+ {'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item/a")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item/b")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False}], maps)
+
def test_set_value(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
self.mcd.set_specification(module_spec)
@@ -582,6 +617,24 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list("Spec2", True)
self.assertEqual(['Spec2/item1', 'Spec2/item2', 'Spec2/item3', 'Spec2/item4', 'Spec2/item5', 'Spec2/item6/value1', 'Spec2/item6/value2'], config_items)
+ def test_get_config_item_list_named_set(self):
+ config_items = self.mcd.get_config_item_list()
+ self.assertEqual([], config_items)
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ config_items = self.mcd.get_config_item_list()
+ self.assertEqual(['Spec32'], config_items)
+ config_items = self.mcd.get_config_item_list(None, False)
+ self.assertEqual(['Spec32'], config_items)
+ config_items = self.mcd.get_config_item_list(None, True)
+ self.assertEqual(['Spec32/named_set_item'], config_items)
+ self.mcd.set_value('Spec32/named_set_item', { "aaaa": 4, "aabb": 5, "bbbb": 6})
+ config_items = self.mcd.get_config_item_list("/Spec32/named_set_item", True)
+ self.assertEqual(['Spec32/named_set_item/aaaa',
+ 'Spec32/named_set_item/aabb',
+ 'Spec32/named_set_item/bbbb',
+ ], config_items)
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index a4dcdec..be862c5 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -98,6 +98,9 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(True, self.validate_data("spec22.spec", "data22_6.data"))
self.assertEqual(True, self.validate_data("spec22.spec", "data22_7.data"))
self.assertEqual(False, self.validate_data("spec22.spec", "data22_8.data"))
+ self.assertEqual(True, self.validate_data("spec32.spec", "data32_1.data"))
+ self.assertEqual(False, self.validate_data("spec32.spec", "data32_2.data"))
+ self.assertEqual(False, self.validate_data("spec32.spec", "data32_3.data"))
def validate_command_params(self, specfile_name, datafile_name, cmd_name):
dd = self.read_spec_file(specfile_name);
More information about the bind10-changes
mailing list