BIND 10 trac1039, updated. 5355f8f14648fddf13cda7240530e7b4216da671 Merge branch 'master' into trac1039

BIND 10 source code commits bind10-changes at lists.isc.org
Mon Jun 27 12:59:22 UTC 2011


The branch, trac1039 has been updated
       via  5355f8f14648fddf13cda7240530e7b4216da671 (commit)
       via  dcdabc780fce8d02c9263f8e98f03b29bb4e5210 (commit)
       via  0fad7d4a8557741f953eda9fed1d351a3d9dc5ef (commit)
       via  3e861eb6aec036b3c5a2f6a71c6ff3adbdc9a55a (commit)
       via  502100d7b9cd9d2300e78826a3bddd024ef38a74 (commit)
       via  b477df5d4dbce5b72ebd183b83555f62aa3fcec5 (commit)
       via  701c0d6d7c484c2f46951d23fba47c760363b7e4 (commit)
       via  5d5173ef0cc48d206464b39f696d03bae9daecea (commit)
       via  7cc074aacd5159778111fa4cbdbe1c89e6a4e51b (commit)
       via  dc087934c1a1946cfdcf63b49a70aa0fefe6b282 (commit)
       via  feed2b3537a4e57e4cb55232242c6622d1fcc654 (commit)
       via  fb032e397153a63e4f1bd3b9b7fc1a89c01e7d6f (commit)
       via  d57f30ffe93b7f45aa6492ea1fba5d594adc01df (commit)
       via  690dafd743f765f04b21d3ce15ec0a63da6a53bd (commit)
       via  251a32a1fd1e7be23d59790e57a4b40fbcdceae3 (commit)
       via  6bc6c57d5761ccd2ef65291e81bbfd995b4758a9 (commit)
       via  0f1b7a45520517a40b7b85d57d461e20e81b7aa9 (commit)
       via  885a4ecf9c87b8e3a028b6488b0e6b853365edc8 (commit)
       via  8d5a5b95c85af1f15654fe164f306fe21065ea73 (commit)
       via  77367a5d67709b65afd8689159e5192416326cb7 (commit)
       via  935bd760ed4f39213f8db8eab730bf41dc217da9 (commit)
       via  52adf933c0bed4753a06632b25a46055d23eb655 (commit)
       via  4fe29ae03d1ff8f6d721b42f4bb356702110c4e0 (commit)
       via  e3fa282a59eea69c50dcb9354e568a8503510511 (commit)
       via  58df861a260fdf06b17194e224fb8c1bd03f0392 (commit)
       via  77e3f8cf3f3fe79c7dd5f92f30d70c47b515f4cd (commit)
       via  4a88c75d4d1decc3b3d5518bd12d592c118a7fd5 (commit)
       via  0f4c693c3399bd9ecf2d2a5682fda8ed1eb8158f (commit)
       via  877e89713ad2398b6637b843a22c3b12607fe5bb (commit)
       via  33e08ca107c127d5c158882e7f2e86770a48c572 (commit)
       via  32fb3ad97a7ccc65ef391b84c8f488d4ea71e963 (commit)
       via  04e7fe3f480462d288c17bd121a368b74292cfd3 (commit)
       via  354fcf46bf93f1e2e317043f2998a8b17f22fe04 (commit)
       via  21acce853a4269f0db76dc2768bb7c5107b1b7d4 (commit)
       via  c021505a1a0d6ecb15a8fd1592b94baff6d115f4 (commit)
       via  02aa9813c1f6829bb9089400c5397f3faba7d9e0 (commit)
       via  3017593b63f34c4bc69494be8c80327eaad5d922 (commit)
       via  62bc6cce6fe7343c4ef06c7e690939fd0aa20148 (commit)
       via  c58fa9e4c5aa486bb270681a45a4f0f7e04b4139 (commit)
       via  89324744df3f73de1beaefb9420aeab5f9ff7824 (commit)
       via  f9070aee950581a47c0916cb1f3b48cd4bfcb7f4 (commit)
       via  ea15d26afc9ced4a11aea6733ea3df0969c5618b (commit)
       via  f685e5c06c382180eb1775bce714ea60154b08f2 (commit)
       via  5a19ee14367d9bb796c8e43c034ee9f327052c86 (commit)
       via  f92d30bb55decf9ed4d7cdf10231dfe2913ca11a (commit)
       via  461a9d0a1e896e0a1b676c6873f74404d5ab95c1 (commit)
       via  bc81810505f7263aedb8654d139510058c251626 (commit)
       via  b57c51e6ddfc6770d5c66eab5aeb1a5238e5a7ea (commit)
       via  ddb1b2241fc03a1d08dea42907ee8f859d3b2f46 (commit)
       via  0b838ba0d3c60203a52d1a918333846116e607cb (commit)
       via  f77021d7838e33e1662b42776ccc49be4435b1f2 (commit)
       via  632cd6151b871e060d09a79f6b8a283cc0ab469c (commit)
       via  81a2df84a879ca5cbaaa61dffce5c413d920011d (commit)
       via  59b380d3682bb9fca26cae2c70c6c49934823f01 (commit)
       via  8b2247a6ae88fbf16bfd65852feb0216a4ea4dac (commit)
       via  1b01a9d09e5ecf21ff8bd9cce1c20372846a775c (commit)
       via  735f817c7f66813135b4ef576c117aa424a5bdad (commit)
       via  99522dd887762e71cbf4d895486f0e2f915eabda (commit)
       via  999736efa5e3aaf06949675c4f77e1ef9cd0d71b (commit)
       via  85d5708e2c44e04b1a148610434de2c040d7142b (commit)
       via  fc29e92af2bd2cfe8fa77dd311b9382680fd6324 (commit)
       via  78cffeb00933814658da0867ada0209403946b51 (commit)
       via  9129a474d3289157a4d8eb761383352dbfc2586e (commit)
       via  417893fc06dcd5339e2cd0278a6badbbe847d6c4 (commit)
       via  8e715d5202d79361622e89ef11a0d433558768f8 (commit)
       via  79ec6c320ec5c24036856fd6b589ba8bf8b26ffc (commit)
       via  8f5fafa643f2d908b9e97b6d08aeb55c4b96addf (commit)
       via  01f9c1c0adfb37d11133c87056161f1edfba2672 (commit)
       via  ac7aaa887d827f8bdf1c2881d245cc655c6847b7 (commit)
       via  ebb6493b8ff763d42fe99438c8befe48c381b4aa (commit)
       via  c786a61641a965545c2e304b1c946afdedc6dc1a (commit)
       via  1efa5d9d7f699cc3ee636d4e1b50b3fb3a863180 (commit)
       via  e5251c4886f626e6ef9f6ba82771c0e949e0071f (commit)
       via  aaad42c52aed2c3890378511ecb2f97a3731d23a (commit)
       via  4beebf47805d0c3f80872e8f690f09c1658ae4e2 (commit)
       via  792c8b202cffc8fed726f10b3514523b1fc92469 (commit)
       via  8c624c6644563ed9c4fecec8b0b5f5dd115fe7ef (commit)
       via  d1c7f98e910bd19d21a649386f1a8066e4f41677 (commit)
       via  a90c8a06056300e0f9f5ffdae72b8a2ba26346fc (commit)
       via  30570ab2d917dc6adec02ba272ee50c17124b688 (commit)
       via  59908b70a929baf829202197d6e7ab5a3557da32 (commit)
       via  585d1c63d6d0126607f424571e38a4a60683cf4b (commit)
       via  d335ae50bb855b7b302dab852005385c0227dcfb (commit)
       via  8034dbfe87c45eaa2c0aef0e715b86fa79a7c4e3 (commit)
       via  0ddf0f5fa4d9d18599a1642b9f87caaa1f463c5e (commit)
       via  5a75094dfdd5f2307c4a1669e05db70355b08682 (commit)
       via  df5bad72ac8dac07a038f29823a1938bc9bbe72c (commit)
       via  61b01087195d5d1f875f01c5fd2eac5dc61d012d (commit)
       via  84fcd68d77cc4aba23721e234622c33666e96c49 (commit)
       via  406cb1fd4af84fcbdf8339cf1afdae2cfb3b7946 (commit)
       via  55689c559b3ac60765940d64a5b51007f94bddf7 (commit)
       via  925ac83b98b02abec3f7f2a70b7c83170f851e29 (commit)
       via  3f47015eab1abd9c7193a9e740f794c6a718c9f7 (commit)
       via  4064b389d13d2861083499517f51d89492156099 (commit)
       via  926985d03e3486f1a83615dc2794d310cb2cb520 (commit)
       via  189f58f73fe02cf2729ab26d6ce8ab6469e82a1c (commit)
       via  1ab0f2e8448a20674bfb8d12d463e5b3fec3ac6e (commit)
       via  dcb32f7928972c3ebe66f13a08560a1e19c62866 (commit)
       via  e25099da714a10dd3bc24be0002f9174fb9610c9 (commit)
       via  6c5a9b252b7bc062ed807aff342d0314811b5bde (commit)
       via  8320629b004d5fc8194afb5d277a0d9e01299121 (commit)
       via  75bda54b2b5cdf06f334e72cd554b616a887d1cf (commit)
       via  2b97fc4f4f30bff13b94ad9b25766b4a6b2f6655 (commit)
       via  8bb79638bc658d8e57b15ae1b16d28a08ec06a69 (commit)
       via  81b49bb4d72fdfb5db8d7ad5f9b086c489acdb86 (commit)
       via  eeebde9d81c4bbc4e5388db5cd6148ca3589b91e (commit)
       via  8c5e6268927737a472348d1ff8ecb2201c76b98a (commit)
       via  cda19a7cbc56ddd67c7d19ec7d072a64477d254b (commit)
       via  c65177c8ea0dfba3aaa84ea1bf2583b2d818d23d (commit)
       via  287edb431de6ae5d7106dd4e593a193908b9ba9f (commit)
       via  99d7c21284686ba3d021a6d09938b82ea56de783 (commit)
       via  309b24ff461b623770e950d6ff12654241bdd39b (commit)
       via  52d165984d1a7784a1a6e0a3b845b19559698203 (commit)
       via  67a88d3fd748cc42730e142cbfa79d0b7fb7a813 (commit)
       via  c1ebc31d07e2c04c0158fbd3e7289db650b41c1d (commit)
       via  ad3f4a5e40390f14762648986dae8430760202c2 (commit)
       via  bfd50c768ccf03b2e4f3d3ecbeb5fb344ff79129 (commit)
       via  ae21ebb0f609f8a2aa8ffc3d4b84c465111ec2c3 (commit)
       via  7cf66b7e44e389205ae4344764fbf136550854ce (commit)
       via  0c3b69c6e170bee7dd775090af2bdd1cae900080 (commit)
       via  f5edd310465966137f0cd4e2109d90f7e5d5965f (commit)
       via  73ac6b09eeeebcdb03965076d4aa8a8a7a361ebe (commit)
       via  86a307f08882d02ad443e848e096a30ca14ec918 (commit)
      from  6f7998f9a209e9dd7b3ac80793098dfd81b489b9 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 5355f8f14648fddf13cda7240530e7b4216da671
Merge: 6f7998f9a209e9dd7b3ac80793098dfd81b489b9 dcdabc780fce8d02c9263f8e98f03b29bb4e5210
Author: Stephen Morris <stephen at isc.org>
Date:   Mon Jun 27 13:47:39 2011 +0100

    Merge branch 'master' into trac1039

commit dcdabc780fce8d02c9263f8e98f03b29bb4e5210
Author: Jelte Jansen <jelte at isc.org>
Date:   Mon Jun 27 14:26:33 2011 +0200

    [master] update changelog

commit 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef
Merge: 3e861eb6aec036b3c5a2f6a71c6ff3adbdc9a55a 701c0d6d7c484c2f46951d23fba47c760363b7e4
Author: Jelte Jansen <jelte at isc.org>
Date:   Mon Jun 27 14:18:22 2011 +0200

    Merge branch 'trac1004'

commit 3e861eb6aec036b3c5a2f6a71c6ff3adbdc9a55a
Author: Stephen Morris <stephen at isc.org>
Date:   Mon Jun 27 13:05:34 2011 +0100

    [master] ChangeLog entry for trac 1012

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |   19 +
 doc/guide/Makefile.am                              |   18 +-
 doc/guide/bind10-guide.html                        |   90 +-
 doc/guide/bind10-guide.xml                         |   86 +
 doc/guide/bind10-messages.html                     |  841 ++++++++
 doc/guide/bind10-messages.xml                      | 2018 ++++++++++++++++++++
 src/bin/auth/Makefile.am                           |   14 +-
 .../config/config_log.cc => bin/auth/auth_log.cc}  |   10 +-
 src/bin/auth/auth_log.h                            |   54 +
 src/bin/auth/auth_messages.mes                     |  260 +++
 src/bin/auth/auth_srv.cc                           |  173 +--
 src/bin/auth/auth_srv.h                            |   21 -
 src/bin/auth/benchmarks/Makefile.am                |    3 +
 src/bin/auth/command.cc                            |   37 +-
 src/bin/auth/main.cc                               |   51 +-
 src/bin/auth/statistics.cc                         |   35 +-
 src/bin/auth/statistics.h                          |    7 +-
 src/bin/auth/tests/Makefile.am                     |    4 +
 src/bin/auth/tests/auth_srv_unittest.cc            |    9 -
 src/bin/auth/tests/statistics_unittest.cc          |    3 +-
 src/bin/cfgmgr/plugins/b10logging.py               |   19 +-
 src/bin/cfgmgr/plugins/tests/Makefile.am           |    2 +-
 src/bin/cfgmgr/plugins/tests/logging_test.py       |  135 ++
 src/bin/xfrin/Makefile.am                          |   11 +-
 src/bin/xfrin/xfrin.py.in                          |   60 +-
 src/bin/xfrin/xfrin_messages.mes                   |   91 +
 src/lib/acl/Makefile.am                            |    6 +-
 src/lib/acl/ip_check.cc                            |  111 ++
 src/lib/acl/ip_check.h                             |  354 ++++
 src/lib/acl/loader.h                               |   64 +-
 src/lib/acl/logic_check.h                          |  206 ++
 src/lib/acl/tests/Makefile.am                      |   10 +-
 src/lib/acl/tests/creators.h                       |  154 ++
 src/lib/acl/tests/ip_check_unittest.cc             |  588 ++++++
 src/lib/acl/tests/loader_test.cc                   |  209 +--
 src/lib/acl/tests/logcheck.h                       |    5 +
 src/lib/acl/tests/logic_check_test.cc              |  208 ++
 src/lib/acl/tests/run_unittests.cc                 |    3 +-
 src/lib/asiodns/Makefile.am                        |   12 +-
 .../asiodns/{asiodef.mes => asiodns_messages.mes}  |   36 +-
 src/lib/asiodns/io_fetch.cc                        |   28 +-
 src/lib/cc/data.h                                  |    2 +-
 src/lib/config/Makefile.am                         |   12 +-
 src/lib/config/ccsession.cc                        |   60 +-
 src/lib/config/ccsession.h                         |   37 +-
 src/lib/config/config_log.h                        |    2 +-
 .../config/{configdef.mes => config_messages.mes}  |   64 +-
 src/lib/config/tests/ccsession_unittests.cc        |   62 +
 src/lib/datasrc/Makefile.am                        |   12 +-
 .../{messagedef.mes => datasrc_messages.mes}       |  683 ++++----
 src/lib/datasrc/logger.h                           |    2 +-
 tools/system_messages.py                           |  413 ++++
 52 files changed, 6523 insertions(+), 891 deletions(-)
 create mode 100644 doc/guide/bind10-messages.html
 create mode 100644 doc/guide/bind10-messages.xml
 copy src/{lib/config/config_log.cc => bin/auth/auth_log.cc} (83%)
 create mode 100644 src/bin/auth/auth_log.h
 create mode 100644 src/bin/auth/auth_messages.mes
 create mode 100644 src/bin/cfgmgr/plugins/tests/logging_test.py
 create mode 100644 src/bin/xfrin/xfrin_messages.mes
 create mode 100644 src/lib/acl/ip_check.cc
 create mode 100644 src/lib/acl/ip_check.h
 create mode 100644 src/lib/acl/logic_check.h
 create mode 100644 src/lib/acl/tests/creators.h
 create mode 100644 src/lib/acl/tests/ip_check_unittest.cc
 create mode 100644 src/lib/acl/tests/logic_check_test.cc
 rename src/lib/asiodns/{asiodef.mes => asiodns_messages.mes} (67%)
 rename src/lib/config/{configdef.mes => config_messages.mes} (54%)
 rename src/lib/datasrc/{messagedef.mes => datasrc_messages.mes} (69%)
 create mode 100644 tools/system_messages.py

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 3b1d518..c1ff9d0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,22 @@
+263.	[func]      jelte
+	Logging configuration can now also accept a * as a first-level
+	name (e.g. '*', or '*.cache'), indicating that every module
+	should use that configuration, unless overridden by an explicit
+	logging configuration for that module
+	(Trac 1004, git 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef)
+
+262.	[func]      stephen
+	Add some initial documentation about the logging framework.
+	Provide BIND 10 Messages Manual in HTML and DocBook? XML formats.
+	This provides all the log message descriptions in a single document.
+	A developer tool, tools/system_messages.py (available in git repo),
+	was written to generate this.
+	(Trac 1012, git 502100d7b9cd9d2300e78826a3bddd024ef38a74)
+
+261.	[func]      stephen
+	Add new-style logging messages to b10-auth.
+	(Trac 738, git c021505a1a0d6ecb15a8fd1592b94baff6d115f4)
+
 260.	[func]      stephen
 	Remove comma between message identification and the message
 	text in the new-style logging messages.
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index c790139..c84ad06 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -1,10 +1,12 @@
 EXTRA_DIST = bind10-guide.css
-EXTRA_DIST += bind10-guide.html
-EXTRA_DIST += bind10-guide.xml
+EXTRA_DIST += bind10-guide.xml bind10-guide.html
+EXTRA_DIST += bind10-messages.xml bind10-messages.html
 
 # This is not a "man" manual, but reuse this for now for docbook.
 if ENABLE_MAN
 
+.PHONY: bind10-messages.xml
+
 bind10-guide.html: bind10-guide.xml
 	xsltproc --novalid --xinclude --nonet \
 		--path $(top_builddir)/doc \
@@ -13,4 +15,16 @@ bind10-guide.html: bind10-guide.xml
 		http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
 		$(srcdir)/bind10-guide.xml
 
+bind10-messages.html: bind10-messages.xml
+	xsltproc --novalid --xinclude --nonet \
+		--path $(top_builddir)/doc \
+		-o $@ \
+		--stringparam html.stylesheet $(srcdir)/bind10-guide.css \
+		http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
+		$(srcdir)/bind10-messages.xml
+
+# So many dependencies that it's easiest just to regenerate it every time
+bind10-messages.xml:
+	$(PYTHON) $(top_srcdir)/tools/system_messages.py -o $@ $(top_srcdir)
+
 endif
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5b127f3..5754cf0 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,12 +1,12 @@
 <html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
 e guide for BIND 10 version
-        20110519.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+        20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
 	Internet Systems Consortium (ISC). It includes DNS libraries
 	and modular components for controlling authoritative and
 	recursive DNS servers.
       </p><p>
         This is the reference guide for BIND 10 version 20110519.
 	The most up-to-date version of this document, along with
-	other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.  </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284848">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
 stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285037">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285057">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285117">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285216">Build</a></span></dt><dt><span class="section"><a href="#id1168230285230">Install</a></span></dt><dt><span class="section"><a href="#id1168230285254">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
 ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285829">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285894">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285924">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
 /a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286380">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+	other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.  </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
 stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
 ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
 /a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
       BIND is the popular implementation of a DNS server, developer
       interfaces, and DNS tools.
       BIND 10 is a rewrite of BIND 9.  BIND 10 is written in C++ and Python
@@ -33,10 +33,8 @@
         3.1 is the minimum version which will work.
       </p><p>
 	BIND 10 uses the Botan crypto library for C++. It requires
-	at least Botan version 1.8.
-      </p><p>
-	BIND 10 uses the log4cplus C++ logging library. It requires
-	at least log4cplus version 1.0.3.
+	at least Botan version 1.8. To build BIND 10, install the
+	Botan libraries and development include headers.
       </p><p>
 	The authoritative server requires SQLite 3.3.9 or newer.
 	The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -138,10 +136,7 @@
       and, of course, DNS. These include detailed developer
       documentation and code examples.
 
-    </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284848">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285037">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285057">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285117">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285216">Build</a></span></dt><dt><span class="section"><a href="#id1168230285230">Install</a></span></dt><dt><span class="section"><a href="#id1168230285254">Install Hierarchy<
 /a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284848"></a>Building Requirements</h2></div></div></div><p>
-          In addition to the run-time requirements, building BIND 10
-          from source code requires various development include headers.
-        </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+    </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
 /a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
             Some operating systems have split their distribution packages into
             a run-time and a development package.  You will need to install
             the development package versions, which include header files and
@@ -152,11 +147,6 @@
   
   
         </p><p>
-	  To build BIND 10, also install the Botan (at least version
-	  1.8) and the log4cplus (at least version 1.0.3)
-          development include headers.
-        </p><p>
-
 	  The Python Library and Python _sqlite3 module are required to
           enable the Xfrout and Xfrin support.
         </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -166,7 +156,7 @@
           Building BIND 10 also requires a C++ compiler and
           standard development headers, make, and pkg-config.
           BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
-          4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
+          4.1.3, 4.2.1, 4.3.2, and 4.4.1.
         </p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
           This quickly covers the standard steps for installing
           and deploying BIND 10 as an authoritative name server using
@@ -202,14 +192,14 @@
         the Git code revision control system or as a downloadable
         tar file. It may also be available in pre-compiled ready-to-use
         packages from operating system vendors.
-      </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285037"></a>Download Tar File</h3></div></div></div><p>
+      </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
           Downloading a release tar file is the recommended method to
           obtain the source code.
         </p><p>
           The BIND 10 releases are available as tar file downloads from
           <a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
           Periodic development snapshots may also be available.
-        </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285057"></a>Retrieve from Git</h3></div></div></div><p>
+        </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
           Downloading this "bleeding edge" code is recommended only for
           developers or advanced users.  Using development code in a production
           environment is not recommended.
@@ -243,7 +233,7 @@
           <span class="command"><strong>autoheader</strong></span>,
           <span class="command"><strong>automake</strong></span>,
           and related commands.
-        </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285117"></a>Configure before the build</h3></div></div></div><p>
+        </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
           BIND 10 uses the GNU Build System to discover build environment
           details.
           To generate the makefiles using the defaults, simply run:
@@ -274,16 +264,16 @@
         </p><p>
           If the configure fails, it may be due to missing or old
           dependencies.
-        </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285216"></a>Build</h3></div></div></div><p>
+        </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
     After the configure step is complete, to build the executables
     from the C++ code and prepare the Python scripts, run:
 
           </p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
-        </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285230"></a>Install</h3></div></div></div><p>
+        </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
           To install the BIND 10 executables, support files,
           and documentation, run:
           </p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
-        </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285254"></a>Install Hierarchy</h3></div></div></div><p>
+        </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
           The following is the layout of the complete BIND 10 installation:
           </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
                 <code class="filename">bin/</code> —
@@ -500,12 +490,12 @@ shutdown
       the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
       channel) the configuration on to the specified module.
     </p><p>
-    </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285829">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285894">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285924">Loading Master Zones Files</a></span></dt></dl></div><p>
+    </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
       The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
       It supports EDNS0 and DNSSEC. It supports IPv6.
       Normally it is started by the <span class="command"><strong>bind10</strong></span> master
       process.
-    </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285829"></a>Server Configurations</h2></div></div></div><p>
+    </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
         <span class="command"><strong>b10-auth</strong></span> is configured via the
         <span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
         The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -525,7 +515,7 @@ This may be a temporary setting until then.
         </p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
               </dd></dl></div><p>
 
-      </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285894"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+      </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
         supports a SQLite3 data source backend and in-memory data source
         backend.
@@ -539,7 +529,7 @@ This may be a temporary setting until then.
         The default is <code class="filename">/usr/local/var/</code>.)
   This data file location may be changed by defining the
   <span class="quote">“<span class="quote">database_file</span>”</span> configuration.
-      </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285924"></a>Loading Master Zones Files</h2></div></div></div><p>
+      </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
         RFC 1035 style DNS master zone files may imported
         into a BIND 10 data source by using the
         <span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -617,7 +607,7 @@ This may be a temporary setting until then.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
      Access control (such as allowing notifies) is not yet provided.
      The primary/secondary service is not yet complete.
-    </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286380">Forwarding</a></span></dt></dl></div><p>
+    </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
       The <span class="command"><strong>b10-resolver</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
 
@@ -646,7 +636,7 @@ This may be a temporary setting until then.
 > <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
 > <strong class="userinput"><code>config commit</code></strong>
 </pre><p>
-    </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286380"></a>Forwarding</h2></div></div></div><p>
+    </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
 
         To enable forwarding, the upstream address and port must be
         configured to forward queries to, such as:
@@ -694,4 +684,48 @@ This may be a temporary setting until then.
     "stats.timestamp": 1295543046.823504
 }
        </pre><p>
+    </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
+        Each message written by BIND 10 to the configured logging destinations
+        comprises a number of components that identify the origin of the
+        message and, if the message indicates a problem, information about the
+        problem that may be useful in fixing it.
+    </p><p>
+        Consider the message below logged to a file:
+        </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+    ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
+    </p><p>
+      Note: the layout of messages written to the system logging
+      file (syslog) may be slightly different.  This message has
+      been split across two lines here for display reasons; in the
+      logging file, it will appear on one line.)
+    </p><p>
+      The log message comprises a number of components:
+
+        </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+            The date and time at which the message was generated.
+        </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+            The severity of the message.
+        </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+	    The source of the message.  This comprises two components:
+	    the BIND 10 process generating the message (in this
+	    case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+	    within the program from which the message originated
+	    (which in the example is the asynchronous I/O link
+	    module, asiolink).
+        </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+	    The message identification.  Every message in BIND 10
+	    has a unique identification, which can be used as an
+	    index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
+	    Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
+        </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+            A brief description of the cause of the problem.  Within this text,
+            information relating to the condition that caused the message to
+            be logged will be included.  In this example, error number 111
+            (an operating system-specific error number) was encountered when
+            trying to open a TCP connection to port 53 on the local system
+            (address 127.0.0.1).  The next step would be to find out the reason
+            for the failure by consulting your system's documentation to
+            identify what error number 111 means.
+        </p></dd></dl></div><p>
+
     </p></div></div></body></html>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index eb6fea0..7d1a006 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -1450,6 +1450,92 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
 
   </chapter>
 
+  <chapter id="logging">
+    <title>Logging</title>
+
+<!-- TODO: how to configure logging, logging destinations etc. -->
+
+    <para>
+        Each message written by BIND 10 to the configured logging destinations
+        comprises a number of components that identify the origin of the
+        message and, if the message indicates a problem, information about the
+        problem that may be useful in fixing it.
+    </para>
+
+    <para>
+        Consider the message below logged to a file:
+        <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+    ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+    </para>
+
+    <para>
+      Note: the layout of messages written to the system logging
+      file (syslog) may be slightly different.  This message has
+      been split across two lines here for display reasons; in the
+      logging file, it will appear on one line.)
+    </para>
+
+    <para>
+      The log message comprises a number of components:
+
+        <variablelist>
+        <varlistentry>
+        <term>2011-06-15 13:48:22.034</term>
+        <listitem><para>
+            The date and time at which the message was generated.
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>ERROR</term>
+        <listitem><para>
+            The severity of the message.
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>[b10-resolver.asiolink]</term>
+        <listitem><para>
+	    The source of the message.  This comprises two components:
+	    the BIND 10 process generating the message (in this
+	    case, <command>b10-resolver</command>) and the module
+	    within the program from which the message originated
+	    (which in the example is the asynchronous I/O link
+	    module, asiolink).
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>ASIODNS_OPENSOCK</term>
+        <listitem><para>
+	    The message identification.  Every message in BIND 10
+	    has a unique identification, which can be used as an
+	    index into the <ulink
+	    url="bind10-messages.html"><citetitle>BIND 10 Messages
+	    Manual</citetitle></ulink> (<ulink
+	    url="http://bind10.isc.org/docs/bind10-messages.html"
+	    />) from which more information can be obtained.
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+        <listitem><para>
+            A brief description of the cause of the problem.  Within this text,
+            information relating to the condition that caused the message to
+            be logged will be included.  In this example, error number 111
+            (an operating system-specific error number) was encountered when
+            trying to open a TCP connection to port 53 on the local system
+            (address 127.0.0.1).  The next step would be to find out the reason
+            for the failure by consulting your system's documentation to
+            identify what error number 111 means.
+        </para></listitem>
+        </varlistentry>
+        </variablelist>
+
+    </para>
+  </chapter>
+
 <!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
 
   <!-- <index>    <title>Index</title> </index> -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
new file mode 100644
index 0000000..b075e96
--- /dev/null
+++ b/doc/guide/bind10-messages.html
@@ -0,0 +1,841 @@
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+        20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+	  Internet Systems Consortium (ISC). It includes DNS libraries
+	  and modular components for controlling authoritative and
+	  recursive DNS servers.
+      </p><p>
+        This is the messages manual for BIND 10 version 20110519.
+	    The most up-to-date version of this document, along with
+	    other documents for BIND 10, can be found at
+        <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+      </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dt><span class="chapter"><a href="#messages">2. BIND 10 Messages</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><p>
+      This document lists each message that can be logged by the
+      programs in the BIND 10 package.  Each entry in this manual
+      is of the form:
+      </p><pre class="screen">IDENTIFICATION message-text</pre><p>
+      ... where "IDENTIFICATION" is the message identification included
+      in each message logged and "message-text" is the accompanying
+      message text.  The "message-text" may include placeholders of the
+      form "%1", "%2" etc.; these parameters are replaced by relevant
+      values when the message is logged.
+    </p><p>
+      Each entry is also accompanied by a description giving more
+      information about the circumstances that result in the message
+      being logged.
+    </p><p>
+      For information on configuring and using BIND 10 logging,
+      refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
+    </p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
+      </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records the the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The the number of the system error that cause the problem is given in the
+message.
+</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying read data from
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+This message should not appear and indicates an internal error if it does.
+Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+The termination method of the resolver's upstream fetch class was called with
+an unknown result code (which is given in the message).  This message should
+not appear and may indicate an internal error.  Please enter a bug report.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the
+command and control channel. An unexpected exception was thrown. This
+most likely points to an internal inconsistency in the module code. The
+exception message is appended to the log error, and the module will
+continue to run, but will not send back an answer.
+</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was a parse error in the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
+The module specification file for this module was rejected by the
+configuration manager. The full error message answer from the
+configuration manager is appended to the log error. The most likely
+cause is that the module is of a different (specification file) version
+than the running configuration manager.
+</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file. Please
+verify that the filename is correct and that its contents are a valid
+BIND10 module specification.
+</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
+Debug information that the hotspot cache was created at startup.
+</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
+Debug information. The hotspot cache is being destroyed.
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
+The hotspot cache is enabled from now on.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
+Debug information. An item was successfully looked up in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+Debug information. An item is being removed from the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
+Debug information. We're processing some internal query for given name and
+type.
+</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
+Debug information. An RRset is being added to the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
+Debug information. A zone is being added into the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</p></dd><dt><a name="DATASRC_MEM_CNAME"></a><span class="term">DATASRC_MEM_CNAME CNAME at the domain '%1'</span></dt><dd><p>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some outher data to CNAME.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_CREATE"></a><span class="term">DATASRC_MEM_CREATE creating zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</p></dd><dt><a name="DATASRC_MEM_DELEG_FOUND"></a><span class="term">DATASRC_MEM_DELEG_FOUND delegation found at '%1'</span></dt><dd><p>
+Debug information. A delegation point was found above the requested record.
+</p></dd><dt><a name="DATASRC_MEM_DESTROY"></a><span class="term">DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A zone from in-memory data source is being destroyed.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_ENCOUNTERED"></a><span class="term">DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</span></dt><dd><p>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
+Debug information. A DNAME was found instead of the requested information.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</p></dd><dt><a name="DATASRC_MEM_DUP_RRSET"></a><span class="term">DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</span></dt><dd><p>
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</p></dd><dt><a name="DATASRC_MEM_EXACT_DELEGATION"></a><span class="term">DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</span></dt><dd><p>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</p></dd><dt><a name="DATASRC_MEM_FIND"></a><span class="term">DATASRC_MEM_FIND find '%1/%2'</span></dt><dd><p>
+Debug information. A search for the requested RRset is being started.
+</p></dd><dt><a name="DATASRC_MEM_FIND_ZONE"></a><span class="term">DATASRC_MEM_FIND_ZONE looking for zone '%1'</span></dt><dd><p>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
+Debug information. The content of master file is being loaded into the memory.
+</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+Debug information. The requested domain does not exist.
+</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</p></dd><dt><a name="DATASRC_MEM_NXRRSET"></a><span class="term">DATASRC_MEM_NXRRSET no such type '%1' at '%2'</span></dt><dd><p>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</p></dd><dt><a name="DATASRC_MEM_OUT_OF_ZONE"></a><span class="term">DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</span></dt><dd><p>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_RENAME"></a><span class="term">DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</span></dt><dd><p>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</p></dd><dt><a name="DATASRC_MEM_SINGLETON"></a><span class="term">DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</span></dt><dd><p>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_SUCCESS"></a><span class="term">DATASRC_MEM_SUCCESS query for '%1/%2' successful</span></dt><dd><p>
+Debug information. The requested record was found.
+</p></dd><dt><a name="DATASRC_MEM_SUPER_STOP"></a><span class="term">DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</span></dt><dd><p>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</p></dd><dt><a name="DATASRC_MEM_SWAP"></a><span class="term">DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</span></dt><dd><p>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_CANCEL"></a><span class="term">DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</span></dt><dd><p>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
+Debug information. A data source is being removed from meta data source.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC"></a><span class="term">DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</span></dt><dd><p>
+Debug information. A NSEC record covering this zone is being added.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC3"></a><span class="term">DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</span></dt><dd><p>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_RRSET"></a><span class="term">DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</span></dt><dd><p>
+Debug information. An RRset is being added to the response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_SOA"></a><span class="term">DATASRC_QUERY_ADD_SOA adding SOA of '%1'</span></dt><dd><p>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</p></dd><dt><a name="DATASRC_QUERY_AUTH_FAIL"></a><span class="term">DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
+Debug information. The whole referral information is being copied into the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GET_NS_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</span></dt><dd><p>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GLUE_FAIL"></a><span class="term">DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</p></dd><dt><a name="DATASRC_QUERY_INVALID_OP"></a><span class="term">DATASRC_QUERY_INVALID_OP invalid query operation requested</span></dt><dd><p>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is an auth query.
+</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for glue addresses.
+</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for referral information.
+</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a simple query.
+</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_NS"></a><span class="term">DATASRC_QUERY_MISSING_NS missing NS records for '%1'</span></dt><dd><p>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_SOA"></a><span class="term">DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</span></dt><dd><p>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NOGLUE_FAIL"></a><span class="term">DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC3"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_ZONE"></a><span class="term">DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</span></dt><dd><p>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
+Debug information. A sure query is being processed now.
+</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</p></dd><dt><a name="DATASRC_QUERY_REF_FAIL"></a><span class="term">DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_RRSIG"></a><span class="term">DATASRC_QUERY_RRSIG unable to answer RRSIG query</span></dt><dd><p>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</p></dd><dt><a name="DATASRC_QUERY_SIMPLE_FAIL"></a><span class="term">DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_UNKNOWN_RESULT"></a><span class="term">DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</span></dt><dd><p>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD"></a><span class="term">DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</span></dt><dd><p>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
+Debug information. The SQLite data source is closing the database file.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being created.
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is trying to identify, which zone
+should hold this domain.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+no such zone in our data.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS"></a><span class="term">DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</span></dt><dd><p>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT"></a><span class="term">DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREC"></a><span class="term">DATASRC_SQLITE_FINDREC looking for record '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF"></a><span class="term">DATASRC_SQLITE_FINDREF looking for referral at '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was trying to identify, if there's a referral. But
+it contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</span></dt><dd><p>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
+Debug information. We're trying to look up name preceding the supplied one.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</p></dd><dt><a name="DATASRC_STATIC_FIND"></a><span class="term">DATASRC_STATIC_FIND looking for '%1/%2'</span></dt><dd><p>
+Debug information. This resource record set is being looked up in the static
+data source.
+</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is above the maximum allowed value and has
+been reduced to that value.
+</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
+The string indicating the extended logging level (used by the underlying
+logger implementation code) is not of the stated form.  In particular,
+it starts DEBUG but does not end with an integer.
+</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is below the minimum allowed value and has
+been increased to that value.
+</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
+A log console output stream was given that was not recognized. The
+output stream should be one of "stdout", or "stderr"
+</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.  In
+this version of the code, such a condition is regarded as an error and the
+read will be abandoned.
+</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+Indicative of a programming error, when it started up, BIND10 detected that
+the given message ID had been registered by one or more modules.  (All message
+IDs should be unique throughout BIND10.)  This has no impact on the operation
+of the server other that erroneous messages may be logged.  (When BIND10 loads
+the message IDs (and their associated text), if a duplicate ID is found it is
+discarded.  However, when the module that supplied the duplicate ID logs that
+particular message, the text supplied by the module that added the original
+ID will be output - something that may bear no relation to the condition being
+logged.
+</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+During start-up a local message file was read.  A line with the listed
+message identification was found in the file, but the identification is not
+one contained in the compiled-in message dictionary.  Either the message
+identification has been mis-spelled in the file, or the local file was used
+for an earlier version of the software and the message with that
+identification has been removed.
+</p><p>
+This message may appear a number of times in the file, once for every such
+unknown message identification.
+</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
+The concatenation of the prefix and the message identification is used as
+a symbol in the C++ module; as such it may only contain
+</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+indicates the message compiler found a line in the message file comprising
+just the "%" and nothing else.
+</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+is generated when a line is found in the message file that contains the
+leading "%" and the message identification but no text.
+</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with more than one argument.
+</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument should be a valid C++ namespace.  The reader does a
+cursory check on its validity, checking that the characters in the namespace
+are correct.  The error is generated when the reader finds an invalid
+character. (Valid are alphanumeric characters, underscores and colons.)
+</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with no arguments.
+</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for the
+reason given.
+</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
+The program was not able to open the specified output file for the reason
+given.
+</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+The $PREFIX directive takes a single argument, a prefix to be added to the
+symbol names when a C++ .h file is created.  This error is generated when the
+compiler finds a $PREFIX directive with more than one argument.
+</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+The $PREFIX argument is used in a symbol name in a C++ header file.  As such,
+it must adhere to restrictions on C++ symbol names (e.g. may only contain
+alphanumeric characters or underscores, and may nor start with a digit).
+A $PREFIX directive was found with an argument (given in the message) that
+violates those restictions.
+</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND10 when it starts to read a
+local message file.  (A local message file may replace the text of one of more
+messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+The specified error was encountered reading from the named message file.
+</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
+</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing to
+the named output file.
+</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for a RR for the
+specified nameserver but received an invalid response.  Either the success
+function was called without a DNS message or the message was invalid on some
+way. (In the latter case, the error should have been picked up elsewhere in
+the processing logic, hence the raising of the error here.)
+</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for the given RR
+type and class, but instead received an answer with the given type and class.
+</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
+A debug message, this is output when a NSAS (nameserver address store -
+part of the resolver) lookup for a zone has been cancelled.
+</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message, this is output when a call is made to the nameserver address
+store (part of the resolver) to obtain the nameservers for the specified zone.
+</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver) is
+making a callback into the resolver to retrieve the address records for the
+specified nameserver.
+</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has been unable to retrieve the specified resource record for the specified
+nameserver.  This is not necessarily a problem - the nameserver may be
+unreachable, in which case the NSAS will try other nameservers in the zone.
+</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has retrieved the given address for the specified nameserver through an
+external query.
+</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the round-trip time (RTT) for a query made to the specified
+nameserver.  The RTT has been updated using the value given and the new RTT is
+displayed.  (The RTT is subject to a calculation that damps out sudden
+changes.  As a result, the new RTT is not necessarily equal to the RTT
+reported.)
+</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
+A debug message recording that an answer has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_CNAME"></a><span class="term">RESLIB_CNAME CNAME received in response to query for <%1></span></dt><dd><p>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent).  However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+A debug message, this indicates that a response was received for the specified
+query and was categorised as a referral.  However, the received message did
+not contain any NS RRsets.  This may indicate a programming error in the
+response classification code.
+</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question.  Previous debug
+messages will have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1:  %3</span></dt><dd><p>
+A debug message indicating that a protocol error was received.  As there
+are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path.  A SERVFAIL will be returned.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
+</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache.  The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question.  The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple.  The first action will be to lookup
+the specified tuple in the cache.  The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer.  The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
+A debug message giving the round-trip time of the last query and response.
+</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
+This is an internal debugging message and is only generated in unit tests.
+It indicates that all upstream queries from the resolver are being routed to
+the specified server, regardless of the address of the nameserver to which
+the query would normally be routed.  As it should never be seen in normal
+operation, it is a warning message instead of a debug message.
+</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+This is a debug message and should only be seen in unit tests.  A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and as
+there are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_TRUNCATED"></a><span class="term">RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</span></dt><dd><p>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP.  There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
+A debug message, the resolver received a NOTIFY message over TCP.  The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
+A debug message, the resolver received a NOTIFY message over UDP.  The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error).  The reason for the error, given as a parameter in the message,
+will give more details.
+</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
+A debug message, output when the resolver configuration has been successfully
+loaded.
+</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
+A debug message, the configuration has been updated with the specified
+information.
+</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
+A debug message, output when the Resolver() object has been created.
+</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+A debug message, this contains details of the response sent back to the querying
+system.
+</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
+This is an error message output when an unhandled exception is caught by the
+resolver.  All it can do is to shut down.
+</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
+The received query has passed all checks and is being forwarded to upstream
+servers.
+</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
+A debug message noting that an exception occurred during the processing of
+a received packet.  The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
+The resolver received a NOTIFY message over TCP.  The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver received a NOTIFY message.  As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
+The received query has passed all checks and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
+A warning message during startup, indicates that no root addresses have been
+set.  This may be because the resolver will get them from a priming query.
+</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message.  This is a malformed
+message, as a DNS query must contain only one question.  The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes).  It will return a message to the sender
+with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded).  The message parameters give
+a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
+This message is logged when a "print_message" command is received over the
+command channel.
+</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded).  The message parameters give a textual
+description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
+A debug message noting that the resolver is creating a RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
+A debug message indicating that the resolver has received a message.  Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+An error message indicating that the resolver configuration has specified a
+negative retry count.  Only zero or positive values are valid.
+</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
+A debug message, output when the main service object (which handles the
+received queries) is created.
+</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+A debug message, lists the parameters associated with the message.  These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers.  Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query.  Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</p><p>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the clent query might require a large number of queries to
+upstream nameservers.  Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout.  When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolve gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
+This information message is output when the resolver has shut down.
+</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+</p></dd></dl></div><p>
+    </p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
new file mode 100644
index 0000000..eaa8bb9
--- /dev/null
+++ b/doc/guide/bind10-messages.xml
@@ -0,0 +1,2018 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash  "&#x2014;" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<book>
+  <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+  <bookinfo>
+    <title>BIND 10 Messages Manual</title>
+
+    <copyright>
+      <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+    </copyright>
+
+    <abstract>
+      <para>BIND 10 is a Domain Name System (DNS) suite managed by
+	  Internet Systems Consortium (ISC). It includes DNS libraries
+	  and modular components for controlling authoritative and
+	  recursive DNS servers.
+      </para>
+      <para>
+        This is the messages manual for BIND 10 version &__VERSION__;.
+	    The most up-to-date version of this document, along with
+	    other documents for BIND 10, can be found at
+        <ulink url="http://bind10.isc.org/docs"/>.
+      </para>
+    </abstract>
+
+    <releaseinfo>This is the messages manual for BIND 10 version
+        &__VERSION__;.</releaseinfo>
+  </bookinfo>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+      This document lists each message that can be logged by the
+      programs in the BIND 10 package.  Each entry in this manual
+      is of the form:
+      <screen>IDENTIFICATION message-text</screen>
+      ... where "IDENTIFICATION" is the message identification included
+      in each message logged and "message-text" is the accompanying
+      message text.  The "message-text" may include placeholders of the
+      form "%1", "%2" etc.; these parameters are replaced by relevant
+      values when the message is logged.
+    </para>
+    <para>
+      Each entry is also accompanied by a description giving more
+      information about the circumstances that result in the message
+      being logged.
+    </para>
+    <para>
+      For information on configuring and using BIND 10 logging,
+      refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+    </para>
+  </chapter>
+
+  <chapter id="messages">
+    <title>BIND 10 Messages</title>
+    <para>
+      <variablelist>
+
+<varlistentry id="ASIODNS_FETCHCOMP">
+<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<listitem><para>
+A debug message, this records the the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_FETCHSTOP">
+<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<listitem><para>
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_OPENSOCK">
+<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The the number of the system error that cause the problem is given in the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_RECVSOCK">
+<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying read data from
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_RECVTMO">
+<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<listitem><para>
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_SENDSOCK">
+<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKORIGIN">
+<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+This message should not appear and indicates an internal error if it does.
+Please enter a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKRESULT">
+<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+The termination method of the resolver's upstream fetch class was called with
+an unknown result code (which is given in the message).  This message should
+not appear and may indicate an internal error.  Please enter a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG">
+<term>CONFIG_CCSESSION_MSG error in CC session message: %1</term>
+<listitem><para>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
+<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
+<listitem><para>
+There was an internal problem handling an incoming message on the
+command and control channel. An unexpected exception was thrown. This
+most likely points to an internal inconsistency in the module code. The
+exception message is appended to the log error, and the module will
+continue to run, but will not send back an answer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_FOPEN_ERR">
+<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_JSON_PARSE">
+<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
+<listitem><para>
+There was a parse error in the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MANAGER_CONFIG">
+<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
+<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<listitem><para>
+The module specification file for this module was rejected by the
+configuration manager. The full error message answer from the
+configuration manager is appended to the log error. The most likely
+cause is that the module is of a different (specification file) version
+than the running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MODULE_SPEC">
+<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file. Please
+verify that the filename is correct and that its contents are a valid
+BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_CREATE">
+<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
+<listitem><para>
+Debug information that the hotspot cache was created at startup.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DESTROY">
+<term>DATASRC_CACHE_DESTROY destroying the hotspot cache</term>
+<listitem><para>
+Debug information. The hotspot cache is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DISABLE">
+<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<listitem><para>
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_ENABLE">
+<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<listitem><para>
+The hotspot cache is enabled from now on.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_EXPIRED">
+<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<listitem><para>
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FOUND">
+<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
+<listitem><para>
+Debug information. An item was successfully looked up in the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FULL">
+<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<listitem><para>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_INSERT">
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<listitem><para>
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_NOT_FOUND">
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<listitem><para>
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_OLD_FOUND">
+<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<listitem><para>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_REMOVE">
+<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<listitem><para>
+Debug information. An item is being removed from the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_SLOTS">
+<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<listitem><para>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DO_QUERY">
+<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
+<listitem><para>
+Debug information. We're processing some internal query for given name and
+type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_RRSET">
+<term>DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</term>
+<listitem><para>
+Debug information. An RRset is being added to the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
+<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
+<listitem><para>
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_ZONE">
+<term>DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</term>
+<listitem><para>
+Debug information. A zone is being added into the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ANY_SUCCESS">
+<term>DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</term>
+<listitem><para>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME">
+<term>DATASRC_MEM_CNAME CNAME at the domain '%1'</term>
+<listitem><para>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_COEXIST">
+<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
+<listitem><para>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some outher data to CNAME.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_TO_NONEMPTY">
+<term>DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</term>
+<listitem><para>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CREATE">
+<term>DATASRC_MEM_CREATE creating zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DELEG_FOUND">
+<term>DATASRC_MEM_DELEG_FOUND delegation found at '%1'</term>
+<listitem><para>
+Debug information. A delegation point was found above the requested record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DESTROY">
+<term>DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A zone from in-memory data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_ENCOUNTERED">
+<term>DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_FOUND">
+<term>DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</term>
+<listitem><para>
+Debug information. A DNAME was found instead of the requested information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_NS">
+<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<listitem><para>
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DOMAIN_EMPTY">
+<term>DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</term>
+<listitem><para>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DUP_RRSET">
+<term>DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</term>
+<listitem><para>
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_EXACT_DELEGATION">
+<term>DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</term>
+<listitem><para>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND">
+<term>DATASRC_MEM_FIND find '%1/%2'</term>
+<listitem><para>
+Debug information. A search for the requested RRset is being started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND_ZONE">
+<term>DATASRC_MEM_FIND_ZONE looking for zone '%1'</term>
+<listitem><para>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_LOAD">
+<term>DATASRC_MEM_LOAD loading zone '%1' from file '%2'</term>
+<listitem><para>
+Debug information. The content of master file is being loaded into the memory.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NOTFOUND">
+<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<listitem><para>
+Debug information. The requested domain does not exist.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NS_ENCOUNTERED">
+<term>DATASRC_MEM_NS_ENCOUNTERED encountered a NS</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NXRRSET">
+<term>DATASRC_MEM_NXRRSET no such type '%1' at '%2'</term>
+<listitem><para>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_OUT_OF_ZONE">
+<term>DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</term>
+<listitem><para>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_RENAME">
+<term>DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</term>
+<listitem><para>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SINGLETON">
+<term>DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</term>
+<listitem><para>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUCCESS">
+<term>DATASRC_MEM_SUCCESS query for '%1/%2' successful</term>
+<listitem><para>
+Debug information. The requested record was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUPER_STOP">
+<term>DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</term>
+<listitem><para>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SWAP">
+<term>DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</term>
+<listitem><para>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_CANCEL">
+<term>DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</term>
+<listitem><para>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
+<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_NS">
+<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD">
+<term>DATASRC_META_ADD adding a data source into meta data source</term>
+<listitem><para>
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
+<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
+<listitem><para>
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_REMOVE">
+<term>DATASRC_META_REMOVE removing data source from meta data source</term>
+<listitem><para>
+Debug information. A data source is being removed from meta data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC">
+<term>DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</term>
+<listitem><para>
+Debug information. A NSEC record covering this zone is being added.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC3">
+<term>DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</term>
+<listitem><para>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_RRSET">
+<term>DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</term>
+<listitem><para>
+Debug information. An RRset is being added to the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_SOA">
+<term>DATASRC_QUERY_ADD_SOA adding SOA of '%1'</term>
+<listitem><para>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_AUTH_FAIL">
+<term>DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_BAD_REFERRAL">
+<term>DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</term>
+<listitem><para>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CACHED">
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<listitem><para>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
+<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<listitem><para>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_COPY_AUTH">
+<term>DATASRC_QUERY_COPY_AUTH copying authoritative section into message</term>
+<listitem><para>
+Debug information. The whole referral information is being copied into the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_DELEGATION">
+<term>DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</term>
+<listitem><para>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
+<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<listitem><para>
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_DNAME">
+<term>DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</term>
+<listitem><para>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FAIL">
+<term>DATASRC_QUERY_FAIL query failed</term>
+<listitem><para>
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
+<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
+<listitem><para>
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_MX_ADDITIONAL">
+<term>DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_NS_ADDITIONAL">
+<term>DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GLUE_FAIL">
+<term>DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_INVALID_OP">
+<term>DATASRC_QUERY_INVALID_OP invalid query operation requested</term>
+<listitem><para>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_AUTH">
+<term>DATASRC_QUERY_IS_AUTH auth query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is an auth query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_GLUE">
+<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for glue addresses.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
+<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_REF">
+<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for referral information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_SIMPLE">
+<term>DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a simple query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISPLACED_TASK">
+<term>DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</term>
+<listitem><para>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_NS">
+<term>DATASRC_QUERY_MISSING_NS missing NS records for '%1'</term>
+<listitem><para>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_SOA">
+<term>DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</term>
+<listitem><para>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NOGLUE_FAIL">
+<term>DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC">
+<term>DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC3">
+<term>DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_ZONE">
+<term>DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</term>
+<listitem><para>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROCESS">
+<term>DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</term>
+<listitem><para>
+Debug information. A sure query is being processed now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
+<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<listitem><para>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_REF_FAIL">
+<term>DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_RRSIG">
+<term>DATASRC_QUERY_RRSIG unable to answer RRSIG query</term>
+<listitem><para>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SIMPLE_FAIL">
+<term>DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
+<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
+<listitem><para>
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TASK_FAIL">
+<term>DATASRC_QUERY_TASK_FAIL task failed with %1</term>
+<listitem><para>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
+<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<listitem><para>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_UNKNOWN_RESULT">
+<term>DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</term>
+<listitem><para>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD">
+<term>DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</term>
+<listitem><para>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_FAIL">
+<term>DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</term>
+<listitem><para>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_REFERRAL">
+<term>DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CLOSE">
+<term>DATASRC_SQLITE_CLOSE closing SQLite database</term>
+<listitem><para>
+Debug information. The SQLite data source is closing the database file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CREATE">
+<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_DESTROY">
+<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
+<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is trying to identify, which zone
+should hold this domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<listitem><para>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+no such zone in our data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND">
+<term>DATASRC_SQLITE_FIND looking for RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS">
+<term>DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</term>
+<listitem><para>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT">
+<term>DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREC">
+<term>DATASRC_SQLITE_FINDREC looking for record '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF">
+<term>DATASRC_SQLITE_FINDREF looking for referral at '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was trying to identify, if there's a referral. But
+it contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_BAD_CLASS">
+<term>DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3">
+<term>DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</term>
+<listitem><para>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE">
+<term>DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</term>
+<listitem><para>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_OPEN">
+<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS">
+<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
+<listitem><para>
+Debug information. We're trying to look up name preceding the supplied one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
+<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
+<listitem><para>
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_SETUP">
+<term>DATASRC_SQLITE_SETUP setting up SQLite database</term>
+<listitem><para>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_BAD_CLASS">
+<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<listitem><para>
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_CREATE">
+<term>DATASRC_STATIC_CREATE creating the static datasource</term>
+<listitem><para>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_FIND">
+<term>DATASRC_STATIC_FIND looking for '%1/%2'</term>
+<listitem><para>
+Debug information. This resource record set is being looked up in the static
+data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_UNEXPECTED_QUERY_STATE">
+<term>DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</term>
+<listitem><para>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_ABOVEDBGMAX">
+<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<listitem><para>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is above the maximum allowed value and has
+been reduced to that value.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BADDEBUG">
+<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<listitem><para>
+The string indicating the extended logging level (used by the underlying
+logger implementation code) is not of the stated form.  In particular,
+it starts DEBUG but does not end with an integer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BELOWDBGMIN">
+<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<listitem><para>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is below the minimum allowed value and has
+been increased to that value.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADDESTINATION">
+<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<listitem><para>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADSEVERITY">
+<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<listitem><para>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADSTREAM">
+<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<listitem><para>
+A log console output stream was given that was not recognized. The
+output stream should be one of "stdout", or "stderr"
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_DUPLNS">
+<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<listitem><para>
+When reading a message file, more than one $NAMESPACE directive was found.  In
+this version of the code, such a condition is regarded as an error and the
+read will be abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_DUPMSGID">
+<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<listitem><para>
+Indicative of a programming error, when it started up, BIND10 detected that
+the given message ID had been registered by one or more modules.  (All message
+IDs should be unique throughout BIND10.)  This has no impact on the operation
+of the server other that erroneous messages may be logged.  (When BIND10 loads
+the message IDs (and their associated text), if a duplicate ID is found it is
+discarded.  However, when the module that supplied the duplicate ID logs that
+particular message, the text supplied by the module that added the original
+ID will be output - something that may bear no relation to the condition being
+logged.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_IDNOTFND">
+<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<listitem><para>
+During start-up a local message file was read.  A line with the listed
+message identification was found in the file, but the identification is not
+one contained in the compiled-in message dictionary.  Either the message
+identification has been mis-spelled in the file, or the local file was used
+for an earlier version of the software and the message with that
+identification has been removed.
+</para><para>
+This message may appear a number of times in the file, once for every such
+unknown message identification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_INVMSGID">
+<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<listitem><para>
+The concatenation of the prefix and the message identification is used as
+a symbol in the C++ module; as such it may only contain
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NOMSGID">
+<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+indicates the message compiler found a line in the message file comprising
+just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NOMSGTXT">
+<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+is generated when a line is found in the message file that contains the
+leading "%" and the message identification but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSEXTRARG">
+<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with more than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSINVARG">
+<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument should be a valid C++ namespace.  The reader does a
+cursory check on its validity, checking that the characters in the namespace
+are correct.  The error is generated when the reader finds an invalid
+character. (Valid are alphanumeric characters, underscores and colons.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSNOARG">
+<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_OPENIN">
+<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for the
+reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_OPENOUT">
+<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<listitem><para>
+The program was not able to open the specified output file for the reason
+given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_PRFEXTRARG">
+<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<listitem><para>
+The $PREFIX directive takes a single argument, a prefix to be added to the
+symbol names when a C++ .h file is created.  This error is generated when the
+compiler finds a $PREFIX directive with more than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_PRFINVARG">
+<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $PREFIX argument is used in a symbol name in a C++ header file.  As such,
+it must adhere to restrictions on C++ symbol names (e.g. may only contain
+alphanumeric characters or underscores, and may nor start with a digit).
+A $PREFIX directive was found with an argument (given in the message) that
+violates those restictions.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_RDLOCMES">
+<term>MSG_RDLOCMES reading local message file %1</term>
+<listitem><para>
+This is an informational message output by BIND10 when it starts to read a
+local message file.  (A local message file may replace the text of one of more
+messages; the ID of the message will not be changed though.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_READERR">
+<term>MSG_READERR error reading from message file %1: %2</term>
+<listitem><para>
+The specified error was encountered reading from the named message file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_UNRECDIR">
+<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<listitem><para>
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_WRITERR">
+<term>MSG_WRITERR error writing to %1: %2</term>
+<listitem><para>
+The specified error was encountered by the message compiler when writing to
+the named output file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVRESPSTR">
+<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<listitem><para>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for a RR for the
+specified nameserver but received an invalid response.  Either the success
+function was called without a DNS message or the message was invalid on some
+way. (In the latter case, the error should have been picked up elsewhere in
+the processing logic, hence the raising of the error here.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVRESPTC">
+<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for the given RR
+type and class, but instead received an answer with the given type and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUPCANCEL">
+<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<listitem><para>
+A debug message, this is output when a NSAS (nameserver address store -
+part of the resolver) lookup for a zone has been cancelled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUPZONE">
+<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<listitem><para>
+A debug message, this is output when a call is made to the nameserver address
+store (part of the resolver) to obtain the nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSADDR">
+<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver) is
+making a callback into the resolver to retrieve the address records for the
+specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSLKUPFAIL">
+<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has been unable to retrieve the specified resource record for the specified
+nameserver.  This is not necessarily a problem - the nameserver may be
+unreachable, in which case the NSAS will try other nameservers in the zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSLKUPSUCC">
+<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has retrieved the given address for the specified nameserver through an
+external query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_SETRTT">
+<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the round-trip time (RTT) for a query made to the specified
+nameserver.  The RTT has been updated using the value given and the new RTT is
+displayed.  (The RTT is subject to a calculation that damps out sudden
+changes.  As a result, the new RTT is not necessarily equal to the RTT
+reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_ANSWER">
+<term>RESLIB_ANSWER answer received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that an answer has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_CNAME">
+<term>RESLIB_CNAME CNAME received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_DEEPEST">
+<term>RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</term>
+<listitem><para>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_FOLLOWCNAME">
+<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<listitem><para>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_LONGCHAIN">
+<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<listitem><para>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent).  However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NONSRRSET">
+<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<listitem><para>
+A debug message, this indicates that a response was received for the specified
+query and was categorised as a referral.  However, the received message did
+not contain any NS RRsets.  This may indicate a programming error in the
+response classification code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NSASLOOK">
+<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<listitem><para>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NXDOMRR">
+<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question.  Previous debug
+messages will have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOL">
+<term>RESLIB_PROTOCOL protocol error in answer for %1:  %3</term>
+<listitem><para>
+A debug message indicating that a protocol error was received.  As there
+are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOLRTRY">
+<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RCODERR">
+<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<listitem><para>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path.  A SERVFAIL will be returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERZONE">
+<term>RESLIB_REFERZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESCAFND">
+<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache.  The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESCANOTFND">
+<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question.  The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESOLVE">
+<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple.  The first action will be to lookup
+the specified tuple in the cache.  The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RRSETFND">
+<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer.  The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RTT">
+<term>RESLIB_RTT round-trip time of last query calculated as %1 ms</term>
+<listitem><para>
+A debug message giving the round-trip time of the last query and response.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNCAFND">
+<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNCALOOK">
+<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQUFAIL">
+<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQUSUCC">
+<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TESTSERV">
+<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<listitem><para>
+This is an internal debugging message and is only generated in unit tests.
+It indicates that all upstream queries from the resolver are being routed to
+the specified server, regardless of the address of the nameserver to which
+the query would normally be routed.  As it should never be seen in normal
+operation, it is a warning message instead of a debug message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TESTUPSTR">
+<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<listitem><para>
+This is a debug message and should only be seen in unit tests.  A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUT">
+<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and as
+there are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUTRTRY">
+<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TRUNCATED">
+<term>RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</term>
+<listitem><para>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP.  There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_UPSTREAM">
+<term>RESLIB_UPSTREAM sending upstream query for <%1> to %2</term>
+<listitem><para>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFRTCP">
+<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<listitem><para>
+A debug message, the resolver received a NOTIFY message over TCP.  The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFRUDP">
+<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<listitem><para>
+A debug message, the resolver received a NOTIFY message over UDP.  The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CLTMOSMALL">
+<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGCHAN">
+<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<listitem><para>
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGERR">
+<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<listitem><para>
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error).  The reason for the error, given as a parameter in the message,
+will give more details.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGLOAD">
+<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<listitem><para>
+A debug message, output when the resolver configuration has been successfully
+loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGUPD">
+<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<listitem><para>
+A debug message, the configuration has been updated with the specified
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CREATED">
+<term>RESOLVER_CREATED main resolver object created</term>
+<listitem><para>
+A debug message, output when the Resolver() object has been created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNSMSGRCVD">
+<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<listitem><para>
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNSMSGSENT">
+<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<listitem><para>
+A debug message, this contains details of the response sent back to the querying
+system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FAILED">
+<term>RESOLVER_FAILED resolver failed, reason: %1</term>
+<listitem><para>
+This is an error message output when an unhandled exception is caught by the
+resolver.  All it can do is to shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FWDADDR">
+<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FWDQUERY">
+<term>RESOLVER_FWDQUERY processing forward query</term>
+<listitem><para>
+The received query has passed all checks and is being forwarded to upstream
+servers.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_HDRERR">
+<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<listitem><para>
+A debug message noting that an exception occurred during the processing of
+a received packet.  The packet has been dropped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_IXFR">
+<term>RESOLVER_IXFR IXFR request received</term>
+<listitem><para>
+The resolver received a NOTIFY message over TCP.  The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_LKTMOSMALL">
+<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NFYNOTAUTH">
+<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<listitem><para>
+The resolver received a NOTIFY message.  As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NORMQUERY">
+<term>RESOLVER_NORMQUERY processing normal query</term>
+<listitem><para>
+The received query has passed all checks and is being processed by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOROOTADDR">
+<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<listitem><para>
+A warning message during startup, indicates that no root addresses have been
+set.  This may be because the resolver will get them from a priming query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTIN">
+<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<listitem><para>
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTONEQUES">
+<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<listitem><para>
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message.  This is a malformed
+message, as a DNS query must contain only one question.  The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_OPCODEUNS">
+<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<listitem><para>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes).  It will return a message to the sender
+with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PARSEERR">
+<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<listitem><para>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded).  The message parameters give
+a textual description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PRINTMSG">
+<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<listitem><para>
+This message is logged when a "print_message" command is received over the
+command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PROTERR">
+<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<listitem><para>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded).  The message parameters give a textual
+description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUSETUP">
+<term>RESOLVER_QUSETUP query setup</term>
+<listitem><para>
+A debug message noting that the resolver is creating a RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUSHUT">
+<term>RESOLVER_QUSHUT query shutdown</term>
+<listitem><para>
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUTMOSMALL">
+<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<listitem><para>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECVMSG">
+<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<listitem><para>
+A debug message indicating that the resolver has received a message.  Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RETRYNEG">
+<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<listitem><para>
+An error message indicating that the resolver configuration has specified a
+negative retry count.  Only zero or positive values are valid.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_ROOTADDR">
+<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SERVICE">
+<term>RESOLVER_SERVICE service object created</term>
+<listitem><para>
+A debug message, output when the main service object (which handles the
+received queries) is created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SETPARAM">
+<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<listitem><para>
+A debug message, lists the parameters associated with the message.  These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers.  Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query.  Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</para><para>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the clent query might require a large number of queries to
+upstream nameservers.  Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout.  When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolve gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SHUTDOWN">
+<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
+<listitem><para>
+This information message is output when the resolver has shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTED">
+<term>RESOLVER_STARTED resolver started</term>
+<listitem><para>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTING">
+<term>RESOLVER_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNEXRESP">
+<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<listitem><para>
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+</para></listitem>
+</varlistentry>
+      </variablelist>
+    </para>
+  </chapter>
+</book>
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 9c52504..64136c1 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -16,7 +16,8 @@ endif
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 
-CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES  = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES += auth_messages.h auth_messages.cc
 
 man_MANS = b10-auth.8
 EXTRA_DIST = $(man_MANS) b10-auth.xml
@@ -34,16 +35,25 @@ auth.spec: auth.spec.pre
 spec_config.h: spec_config.h.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
 
-BUILT_SOURCES = spec_config.h
+auth_messages.h auth_messages.cc: auth_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/auth/auth_messages.mes
+
+BUILT_SOURCES = spec_config.h auth_messages.h auth_messages.cc
+
 pkglibexec_PROGRAMS = b10-auth
 b10_auth_SOURCES = query.cc query.h
 b10_auth_SOURCES += auth_srv.cc auth_srv.h
+b10_auth_SOURCES += auth_log.cc auth_log.h
 b10_auth_SOURCES += change_user.cc change_user.h
 b10_auth_SOURCES += auth_config.cc auth_config.h
 b10_auth_SOURCES += command.cc command.h
 b10_auth_SOURCES += common.h common.cc
 b10_auth_SOURCES += statistics.cc statistics.h
 b10_auth_SOURCES += main.cc
+
+nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
+EXTRA_DIST += auth_messages.mes
+
 b10_auth_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
 b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/auth_log.cc b/src/bin/auth/auth_log.cc
new file mode 100644
index 0000000..d41eaea
--- /dev/null
+++ b/src/bin/auth/auth_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the top-level component of b10-auth.
+
+#include "auth_log.h"
+
+namespace isc {
+namespace auth {
+
+isc::log::Logger auth_logger("auth");
+
+} // namespace auth
+} // namespace isc
+
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
new file mode 100644
index 0000000..5205624
--- /dev/null
+++ b/src/bin/auth/auth_log.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_LOG__H
+#define __AUTH_LOG__H
+
+#include <log/macros.h>
+#include <auth/auth_messages.h>
+
+namespace isc {
+namespace auth {
+
+/// \brief Auth Logging
+///
+/// Defines the levels used to output debug messages in the "auth" part of
+/// the b10-auth program.  Higher numbers equate to more verbose (and detailed)
+/// output.
+
+// Debug messages indicating normal startup are logged at this debug level.
+const int DBG_AUTH_START = 10;
+
+// Debug level used to log setting information (such as configuration changes).
+const int DBG_AUTH_OPS = 30;
+
+// Trace detailed operations, including errors raised when processing invalid
+// packets.  (These are not logged at severities of WARN or higher for fear
+// that a set of deliberately invalid packets set to the authoritative server
+// could overwhelm the logging.)
+const int DBG_AUTH_DETAIL = 50;
+
+// This level is used to log the contents of packets received and sent.
+const int DBG_AUTH_MESSAGES = 70;
+
+/// Define the logger for the "auth" module part of b10-auth.  We could define
+/// a logger in each file, but we would want to define a common name to avoid
+/// spelling mistakes, so it is just one small step from there to define a
+/// module-common logger.
+extern isc::log::Logger auth_logger;
+
+} // namespace nsas
+} // namespace isc
+
+#endif // __AUTH_LOG__H
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
new file mode 100644
index 0000000..8553d17
--- /dev/null
+++ b/src/bin/auth/auth_messages.mes
@@ -0,0 +1,260 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::auth
+
+% AUTH_AXFR_ERROR error handling AXFR request: %1
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+
+% AUTH_AXFR_UDP AXFR query received over UDP
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+
+% AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+
+% AUTH_CONFIG_CHANNEL_CREATED configuration session channel created
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_STARTED configuration session channel started
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+
+% AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+
+% AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+
+% AUTH_DATA_SOURCE data source database file: %1
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+
+% AUTH_DNS_SERVICES_CREATED DNS services created
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritiative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+
+% AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+
+% AUTH_LOAD_TSIG loading TSIG keys
+This is a debug message indicating that the authoritiative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_LOAD_ZONE loaded zone %1/%2
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+
+% AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+
+% AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+
+% AUTH_NO_STATS_SESSION session interface for statistics is not available
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+
+% AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+
+% AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+
+% AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+
+% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+
+% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+
+% AUTH_PACKET_RECEIVED message received:\n%1
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_PROCESS_FAIL message processing failure: %1
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+
+The server will return a SERVFAIL error code to the sender of the packet.
+However, this message indicates a potential error in the server.
+Please open a bug ticket for this issue.
+
+% AUTH_RECEIVED_COMMAND command '%1' received
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+
+% AUTH_RECEIVED_SENDSTATS command 'sendstats' received
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+
+% AUTH_RESPONSE_RECEIVED received response message, ignoring
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+
+% AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SERVER_CREATED server created
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+
+% AUTH_SERVER_FAILED server failed: %1
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+
+% AUTH_SERVER_STARTED server stated
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+
+% AUTH_SQLITE3 nothing to do for loading sqlite3
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+
+% AUTH_STATS_CHANNEL_CREATED STATS session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel.  It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_STATS_COMMS communication error in sending statistics data: %1
+An error was encountered when the authoritiative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+
+% AUTH_STATS_TIMEOUT timeout while sending statistics data: %1
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+
+% AUTH_STATS_TIMER_DISABLED statistics timer has been disabled
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+
+% AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+
+% AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+
+% AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process.  It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+
+% AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process.  It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+
+% AUTH_ZONEMGR_ERROR received error response from zone manager: %1
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+
+
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 9e01155..f29fd05 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -59,6 +59,7 @@
 #include <auth/auth_srv.h>
 #include <auth/query.h>
 #include <auth/statistics.h>
+#include <auth/auth_log.h>
 
 using namespace std;
 
@@ -104,7 +105,6 @@ public:
 
     /// These members are public because AuthSrv accesses them directly.
     ModuleCCSession* config_session_;
-    bool verbose_mode_;
     AbstractSession* xfrin_session_;
 
     /// In-memory data source.  Currently class IN only for simplicity.
@@ -143,11 +143,11 @@ private:
 
 AuthSrvImpl::AuthSrvImpl(const bool use_cache,
                          AbstractXfroutClient& xfrout_client) :
-    config_session_(NULL), verbose_mode_(false),
+    config_session_(NULL),
     xfrin_session_(NULL),
     memory_datasrc_class_(RRClass::IN()),
     statistics_timer_(io_service_),
-    counters_(verbose_mode_),
+    counters_(),
     keyring_(NULL),
     xfrout_connected_(false),
     xfrout_client_(xfrout_client)
@@ -251,7 +251,7 @@ public:
 
 void
 makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
-                 const Rcode& rcode, const bool verbose_mode,
+                 const Rcode& rcode, 
                  std::auto_ptr<TSIGContext> tsig_context =
                  std::auto_ptr<TSIGContext>())
 {
@@ -289,22 +289,9 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
     } else {
         message->toWire(renderer);
     }
-
-    if (verbose_mode) {
-        cerr << "[b10-auth] sending an error response (" <<
-            renderer.getLength() << " bytes):\n" << message->toText() << endl;
-    }
-}
-}
-
-void
-AuthSrv::setVerbose(const bool on) {
-    impl_->verbose_mode_ = on;
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
+              .arg(message->toText());
 }
-
-bool
-AuthSrv::getVerbose() const {
-    return (impl_->verbose_mode_);
 }
 
 IOService&
@@ -362,15 +349,12 @@ AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
         isc_throw(InvalidParameter,
                   "Memory data source is not supported for RR class "
                   << rrclass);
-    }
-    if (impl_->verbose_mode_) {
-        if (!impl_->memory_datasrc_ && memory_datasrc) {
-            cerr << "[b10-auth] Memory data source is enabled for class "
-                 << rrclass << endl;
-        } else if (impl_->memory_datasrc_ && !memory_datasrc) {
-            cerr << "[b10-auth] Memory data source is disabled for class "
-                 << rrclass << endl;
-        }
+    } else if (!impl_->memory_datasrc_ && memory_datasrc) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
+                  .arg(rrclass);
+    } else if (impl_->memory_datasrc_ && !memory_datasrc) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
+                  .arg(rrclass);
     }
     impl_->memory_datasrc_ = memory_datasrc;
 }
@@ -392,18 +376,13 @@ AuthSrv::setStatisticsTimerInterval(uint32_t interval) {
     }
     if (interval == 0) {
         impl_->statistics_timer_.cancel();
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_DISABLED);
     } else {
         impl_->statistics_timer_.setup(boost::bind(&AuthSrv::submitStatistics,
                                                    this),
                                        interval * 1000);
-    }
-    if (impl_->verbose_mode_) {
-        if (interval == 0) {
-            cerr << "[b10-auth] Disabled statistics timer" << endl;
-        } else {
-            cerr << "[b10-auth] Set statistics timer to " << interval
-                 << " seconds" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_SET)
+                  .arg(interval);
     }
 }
 
@@ -420,17 +399,13 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
 
         // Ignore all responses.
         if (message->getHeaderFlag(Message::HEADERFLAG_QR)) {
-            if (impl_->verbose_mode_) {
-                cerr << "[b10-auth] received unexpected response, ignoring"
-                     << endl;
-            }
+            LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RESPONSE_RECEIVED);
             server->resume(false);
             return;
         }
     } catch (const Exception& ex) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] DNS packet exception: " << ex.what() << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_HEADER_PARSE_FAIL)
+                  .arg(ex.what());
         server->resume(false);
         return;
     }
@@ -439,27 +414,21 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
         // Parse the message.
         message->fromWire(request_buffer);
     } catch (const DNSProtocolError& error) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] returning " <<  error.getRcode().toText()
-                 << ": " << error.what() << endl;
-        }
-        makeErrorMessage(message, buffer, error.getRcode(),
-                         impl_->verbose_mode_);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+                  .arg(error.getRcode().toText()).arg(error.what());
+        makeErrorMessage(message, buffer, error.getRcode());
         server->resume(true);
         return;
     } catch (const Exception& ex) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] returning SERVFAIL: " << ex.what() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(),
-                         impl_->verbose_mode_);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+                  .arg(ex.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL());
         server->resume(true);
         return;
     } // other exceptions will be handled at a higher layer.
 
-    if (impl_->verbose_mode_) {
-        cerr << "[b10-auth] received a message:\n" << message->toText() << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_PACKET_RECEIVED)
+              .arg(message->toText());
 
     // Perform further protocol-level validation.
     // TSIG first
@@ -481,20 +450,16 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
 
     bool sendAnswer = true;
     if (tsig_error != TSIGError::NOERROR()) {
-        makeErrorMessage(message, buffer, tsig_error.toRcode(),
-                         impl_->verbose_mode_, tsig_context);
+        makeErrorMessage(message, buffer, tsig_error.toRcode(), tsig_context);
     } else if (message->getOpcode() == Opcode::NOTIFY()) {
         sendAnswer = impl_->processNotify(io_message, message, buffer,
                                           tsig_context);
     } else if (message->getOpcode() != Opcode::QUERY()) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] unsupported opcode" << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::NOTIMP(),
-                         impl_->verbose_mode_, tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE)
+                  .arg(message->getOpcode().toText());
+        makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
     } else if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
-        makeErrorMessage(message, buffer, Rcode::FORMERR(),
-                         impl_->verbose_mode_, tsig_context);
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
     } else {
         ConstQuestionPtr question = *message->beginQuestion();
         const RRType &qtype = question->getType();
@@ -502,8 +467,7 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
             sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
                                                  tsig_context);
         } else if (qtype == RRType::IXFR()) {
-            makeErrorMessage(message, buffer, Rcode::NOTIMP(),
-                             impl_->verbose_mode_, tsig_context);
+            makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
         } else {
             sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
                                                    tsig_context);
@@ -550,11 +514,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
             data_sources_.doQuery(query);
         }
     } catch (const Exception& ex) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] Internal error, returning SERVFAIL: " <<
-                ex.what() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+        LOG_ERROR(auth_logger, AUTH_PROCESS_FAIL).arg(ex.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL());
         return (true);
     }
 
@@ -567,12 +528,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
     } else {
         message->toWire(renderer);
     }
-
-    if (verbose_mode_) {
-        cerr << "[b10-auth] sending a response ("
-             << renderer.getLength()
-             << " bytes):\n" << message->toText() << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_NORMAL_RESPONSE)
+              .arg(renderer.getLength()).arg(message->toText());
 
     return (true);
 }
@@ -586,11 +543,8 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
     incCounter(io_message.getSocket().getProtocol());
 
     if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] AXFR query over UDP isn't allowed" << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_UDP);
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
 
@@ -613,12 +567,9 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
             xfrout_connected_ = false;
         }
 
-        if (verbose_mode_) {
-            cerr << "[b10-auth] Error in handling XFR request: " << err.what()
-                 << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+                  .arg(err.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), tsig_context);
         return (true);
     }
 
@@ -633,22 +584,16 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
     // The incoming notify must contain exactly one question for SOA of the
     // zone name.
     if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
-        if (verbose_mode_) {
-                cerr << "[b10-auth] invalid number of questions in notify: "
-                     << message->getRRCount(Message::SECTION_QUESTION) << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_QUESTIONS)
+                  .arg(message->getRRCount(Message::SECTION_QUESTION));
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
     ConstQuestionPtr question = *message->beginQuestion();
     if (question->getType() != RRType::SOA()) {
-        if (verbose_mode_) {
-                cerr << "[b10-auth] invalid question RR type in notify: "
-                     << question->getType() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_RRTYPE)
+                  .arg(question->getType().toText());
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
 
@@ -664,10 +609,7 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
     // silent about such cases, but there doesn't seem to be anything we can
     // improve at the primary server side by sending an error anyway.
     if (xfrin_session_ == NULL) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] "
-                "session interface for xfrin is not available" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
         return (false);
     }
 
@@ -693,16 +635,12 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
         int rcode;
         parsed_answer = parseAnswer(rcode, answer);
         if (rcode != 0) {
-            if (verbose_mode_) {
-                cerr << "[b10-auth] failed to notify Zonemgr: "
-                     << parsed_answer->str() << endl;
-            }
+            LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
+                      .arg(parsed_answer->str());
             return (false);
         }
     } catch (const Exception& ex) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] failed to notify Zonemgr: " << ex.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_ZONEMGR_COMMS).arg(ex.what());
         return (false);
     }
 
@@ -762,10 +700,7 @@ AuthSrvImpl::setDbFile(ConstElementPtr config) {
     } else {
         return (answer);
     }
-
-    if (verbose_mode_) {
-        cerr << "[b10-auth] Data source database file: " << db_file_ << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_DATA_SOURCE).arg(db_file_);
 
     // create SQL data source
     // Note: the following step is tricky to be exception-safe and to ensure
@@ -795,9 +730,7 @@ AuthSrv::updateConfig(ConstElementPtr new_config) {
         }
         return (impl_->setDbFile(new_config));
     } catch (const isc::Exception& error) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] error: " << error.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_CONFIG_UPDATE_FAIL).arg(error.what());
         return (isc::config::createAnswer(1, error.what()));
     }
 }
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 19c97b5..7eede97 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -124,27 +124,6 @@ public:
                         isc::util::OutputBufferPtr buffer,
                         isc::asiodns::DNSServer* server);
 
-    /// \brief Set verbose flag
-    ///
-    /// \param on The new value of the verbose flag
-
-    /// \brief Enable or disable verbose logging.
-    ///
-    /// This method never throws an exception.
-    ///
-    /// \param on \c true to enable verbose logging; \c false to disable
-    /// verbose logging.
-    void setVerbose(const bool on);
-
-    /// \brief Returns the logging verbosity of the \c AuthSrv object.
-    ///
-    /// This method never throws an exception.
-    ///
-    /// \return \c true if verbose logging is enabled; otherwise \c false.
-
-    /// \brief Get the current value of the verbose flag
-    bool getVerbose() const;
-
     /// \brief Updates the data source for the \c AuthSrv object.
     ///
     /// This method installs or replaces the data source that the \c AuthSrv
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 77d171f..cf3fe4a 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -12,6 +12,9 @@ query_bench_SOURCES += ../query.h  ../query.cc
 query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
 query_bench_SOURCES += ../auth_config.h ../auth_config.cc
 query_bench_SOURCES += ../statistics.h ../statistics.cc
+query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+
+nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
 
 query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
 query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index eafcae8..fe3d729 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -27,16 +27,18 @@
 
 #include <config/ccsession.h>
 
+#include <auth/auth_log.h>
 #include <auth/auth_srv.h>
 #include <auth/command.h>
 
-using namespace std;
-using boost::shared_ptr;
 using boost::scoped_ptr;
-using namespace isc::dns;
+using boost::shared_ptr;
+using namespace isc::auth;
+using namespace isc::config;
 using namespace isc::data;
 using namespace isc::datasrc;
-using namespace isc::config;
+using namespace isc::dns;
+using namespace std;
 
 namespace {
 /// An exception that is thrown if an error occurs while handling a command
@@ -115,9 +117,7 @@ public:
 class SendStatsCommand : public AuthCommand {
 public:
     virtual void exec(AuthSrv& server, isc::data::ConstElementPtr) {
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] command 'sendstats' received" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_SENDSTATS);
         server.submitStatistics();
     }
 };
@@ -140,11 +140,8 @@ public:
                                                       oldzone->getOrigin()));
         newzone->load(oldzone->getFileName());
         oldzone->swap(*newzone);
-
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] Loaded zone '" << newzone->getOrigin()
-                 << "'/" << newzone->getClass() << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
+                  .arg(newzone->getOrigin()).arg(newzone->getClass());
     }
 
 private:
@@ -164,10 +161,7 @@ private:
         ConstElementPtr datasrc_elem = args->get("datasrc");
         if (datasrc_elem) {
             if (datasrc_elem->stringValue() == "sqlite3") {
-                if (server.getVerbose()) {
-                    cerr << "[b10-auth] Nothing to do for loading sqlite3"
-                         << endl;
-                }
+                LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_SQLITE3);
                 return (false);
             } else if (datasrc_elem->stringValue() != "memory") {
                 // (note: at this point it's guaranteed that datasrc_elem
@@ -233,18 +227,13 @@ ConstElementPtr
 execAuthServerCommand(AuthSrv& server, const string& command_id,
                       ConstElementPtr args)
 {
-    if (server.getVerbose()) {
-        cerr << "[b10-auth] Received '" << command_id << "' command" << endl;
-    }
-
+    LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_COMMAND).arg(command_id);
     try {
         scoped_ptr<AuthCommand>(createAuthCommand(command_id))->exec(server,
                                                                      args);
     } catch (const isc::Exception& ex) {
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] Command '" << command_id
-                 << "' execution failed: " << ex.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_COMMAND_FAILED).arg(command_id)
+                                                   .arg(ex.what());
         return (createAnswer(1, ex.what()));
     }
 
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 36c616e..c8f6762 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,26 +44,26 @@
 #include <auth/command.h>
 #include <auth/change_user.h>
 #include <auth/auth_srv.h>
+#include <auth/auth_log.h>
 #include <asiodns/asiodns.h>
 #include <asiolink/asiolink.h>
-#include <log/dummylog.h>
 #include <log/logger_support.h>
 #include <server_common/keyring.h>
 
 using namespace std;
-using namespace isc::data;
+using namespace isc::asiodns;
+using namespace isc::asiolink;
+using namespace isc::auth;
 using namespace isc::cc;
 using namespace isc::config;
+using namespace isc::data;
 using namespace isc::dns;
+using namespace isc::log;
 using namespace isc::util;
 using namespace isc::xfr;
-using namespace isc::asiolink;
-using namespace isc::asiodns;
 
 namespace {
 
-bool verbose_mode = false;
-
 /* need global var for config/command handlers.
  * todo: turn this around, and put handlers in the authserver
  * class itself? */
@@ -89,6 +89,7 @@ usage() {
     cerr << "\t-v: verbose output" << endl;
     exit(1);
 }
+
 } // end of anonymous namespace
 
 int
@@ -96,6 +97,7 @@ main(int argc, char* argv[]) {
     int ch;
     const char* uid = NULL;
     bool cache = true;
+    bool verbose = false;
 
     while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
         switch (ch) {
@@ -106,8 +108,7 @@ main(int argc, char* argv[]) {
             uid = optarg;
             break;
         case 'v':
-            verbose_mode = true;
-            isc::log::denabled = true;
+            verbose = true;
             break;
         case '?':
         default:
@@ -121,7 +122,7 @@ main(int argc, char* argv[]) {
 
     // Initialize logging.  If verbose, we'll use maximum verbosity.
     isc::log::initLogger("b10-auth",
-                         (verbose_mode ? isc::log::DEBUG : isc::log::INFO),
+                         (verbose ? isc::log::DEBUG : isc::log::INFO),
                          isc::log::MAX_DEBUG_LEVEL, NULL);
 
     int ret = 0;
@@ -144,8 +145,7 @@ main(int argc, char* argv[]) {
         }
 
         auth_server = new AuthSrv(cache, xfrout_client);
-        auth_server->setVerbose(verbose_mode);
-        cout << "[b10-auth] Server created." << endl;
+        LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
 
         SimpleCallback* checkin = auth_server->getCheckinProvider();
         IOService& io_service = auth_server->getIOService();
@@ -154,10 +154,10 @@ main(int argc, char* argv[]) {
 
         DNSService dns_service(io_service, checkin, lookup, answer);
         auth_server->setDNSService(dns_service);
-        cout << "[b10-auth] DNSServices created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
 
         cc_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Configuration session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
 
         // We delay starting listening to new commands/config just before we
         // go into the main loop to avoid confusion due to mixture of
@@ -167,19 +167,19 @@ main(int argc, char* argv[]) {
         config_session = new ModuleCCSession(specfile, *cc_session,
                                              my_config_handler,
                                              my_command_handler, false);
-        cout << "[b10-auth] Configuration channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
 
         xfrin_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Xfrin session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
         xfrin_session->establish(NULL);
         xfrin_session_established = true;
-        cout << "[b10-auth] Xfrin session channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
 
         statistics_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Statistics session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_CREATED);
         statistics_session->establish(NULL);
         statistics_session_established = true;
-        cout << "[b10-auth] Statistics session channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_ESTABLISHED);
 
         auth_server->setXfrinSession(xfrin_session);
         auth_server->setStatisticsSession(statistics_session);
@@ -188,33 +188,34 @@ main(int argc, char* argv[]) {
         // all initial configurations, but as a short term workaround we
         // handle the traditional "database_file" setup by directly calling
         // updateConfig().
-        // if server load configure failed, we won't exit, give user second chance
-        // to correct the configure.
+        // if server load configure failed, we won't exit, give user second
+        // chance to correct the configure.
         auth_server->setConfigSession(config_session);
         try {
             configureAuthServer(*auth_server, config_session->getFullConfig());
             auth_server->updateConfig(ElementPtr());
         } catch (const AuthConfigError& ex) {
-            cout << "[bin10-auth] Server load config failed:" << ex.what() << endl;
+            LOG_ERROR(auth_logger, AUTH_CONFIG_LOAD_FAIL).arg(ex.what());
         }
 
         if (uid != NULL) {
             changeUser(uid);
         }
 
-        cout << "[b10-auth] Loading TSIG keys" << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_LOAD_TSIG);
         isc::server_common::initKeyring(*config_session);
         auth_server->setTSIGKeyRing(&isc::server_common::keyring);
 
         // Now start asynchronous read.
         config_session->start();
-        cout << "[b10-auth] Configuration channel started." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_STARTED);
 
-        cout << "[b10-auth] Server started." << endl;
+        // Successfully initialized.
+        LOG_INFO(auth_logger, AUTH_SERVER_STARTED);
         io_service.run();
 
     } catch (const std::exception& ex) {
-        cerr << "[b10-auth] Server failed: " << ex.what() << endl;
+        LOG_FATAL(auth_logger, AUTH_SERVER_FAILED).arg(ex.what());
         ret = 1;
     }
 
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 415aa14..76e5007 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -13,6 +13,7 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <auth/statistics.h>
+#include <auth/auth_log.h>
 
 #include <cc/data.h>
 #include <cc/session.h>
@@ -20,6 +21,8 @@
 #include <sstream>
 #include <iostream>
 
+using namespace isc::auth;
+
 // TODO: We need a namespace ("auth_server"?) to hold
 // AuthSrv and AuthCounters.
 
@@ -29,10 +32,7 @@ private:
     AuthCountersImpl(const AuthCountersImpl& source);
     AuthCountersImpl& operator=(const AuthCountersImpl& source);
 public:
-    // References verbose_mode flag in AuthSrvImpl
-    // TODO: Fix this short term workaround for logging
-    // after we have logging framework
-    AuthCountersImpl(const bool& verbose_mode);
+    AuthCountersImpl();
     ~AuthCountersImpl();
     void inc(const AuthCounters::CounterType type);
     bool submitStatistics() const;
@@ -42,15 +42,13 @@ public:
 private:
     std::vector<uint64_t> counters_;
     isc::cc::AbstractSession* statistics_session_;
-    const bool& verbose_mode_;
 };
 
-AuthCountersImpl::AuthCountersImpl(const bool& verbose_mode) :
+AuthCountersImpl::AuthCountersImpl() :
     // initialize counter
     // size: AuthCounters::COUNTER_TYPES, initial value: 0
     counters_(AuthCounters::COUNTER_TYPES, 0),
-    statistics_session_(NULL),
-    verbose_mode_(verbose_mode)
+    statistics_session_(NULL)
 {}
 
 AuthCountersImpl::~AuthCountersImpl()
@@ -64,11 +62,7 @@ AuthCountersImpl::inc(const AuthCounters::CounterType type) {
 bool
 AuthCountersImpl::submitStatistics() const {
     if (statistics_session_ == NULL) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "session interface for statistics"
-                      << " is not available" << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_NO_STATS_SESSION);
         return (false);
     }
     std::stringstream statistics_string;
@@ -95,18 +89,10 @@ AuthCountersImpl::submitStatistics() const {
         // currently it just returns empty message
         statistics_session_->group_recvmsg(env, answer, false, seq);
     } catch (const isc::cc::SessionError& ex) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "communication error in sending statistics data: "
-                      << ex.what() << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_STATS_COMMS).arg(ex.what());
         return (false);
     } catch (const isc::cc::SessionTimeout& ex) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "timeout happened while sending statistics data: "
-                      << ex.what() << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_STATS_TIMEOUT).arg(ex.what());
         return (false);
     }
     return (true);
@@ -125,8 +111,7 @@ AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
     return (counters_.at(type));
 }
 
-AuthCounters::AuthCounters(const bool& verbose_mode) :
-    impl_(new AuthCountersImpl(verbose_mode))
+AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
 {}
 
 AuthCounters::~AuthCounters() {
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 9e5240e..5bf6436 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -61,15 +61,10 @@ public:
     };
     /// The constructor.
     ///
-    /// \param verbose_mode reference to verbose_mode_ of AuthSrvImpl
-    ///
     /// This constructor is mostly exception free. But it may still throw
     /// a standard exception if memory allocation fails inside the method.
     ///
-    /// \todo Fix this short term workaround for logging
-    /// after we have logging framework.
-    ///
-    AuthCounters(const bool& verbose_mode);
+    AuthCounters();
     /// The destructor.
     ///
     /// This method never throws an exception.
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index a4620f5..71520c2 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -22,6 +22,7 @@ TESTS += run_unittests
 run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
 run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
 run_unittests_SOURCES += ../auth_srv.h ../auth_srv.cc
+run_unittests_SOURCES += ../auth_log.h ../auth_log.cc
 run_unittests_SOURCES += ../query.h ../query.cc
 run_unittests_SOURCES += ../change_user.h ../change_user.cc
 run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
@@ -36,6 +37,9 @@ run_unittests_SOURCES += query_unittest.cc
 run_unittests_SOURCES += change_user_unittest.cc
 run_unittests_SOURCES += statistics_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
+
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 run_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index d922901..2b20d65 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -190,15 +190,6 @@ TEST_F(AuthSrvTest, unsupportedRequest) {
     unsupportedRequest();
 }
 
-// Simple API check
-TEST_F(AuthSrvTest, verbose) {
-    EXPECT_FALSE(server.getVerbose());
-    server.setVerbose(true);
-    EXPECT_TRUE(server.getVerbose());
-    server.setVerbose(false);
-    EXPECT_FALSE(server.getVerbose());
-}
-
 // Multiple questions.  Should result in FORMERR.
 TEST_F(AuthSrvTest, multiQuestion) {
     multiQuestion();
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 062b70d..9a3dded 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -69,13 +69,12 @@ private:
     };
 
 protected:
-    AuthCountersTest() : verbose_mode_(false), counters(verbose_mode_) {
+    AuthCountersTest() : counters() {
         counters.setStatisticsSession(&statistics_session_);
     }
     ~AuthCountersTest() {
     }
     MockSession statistics_session_;
-    bool verbose_mode_;
     AuthCounters counters;
 };
 
diff --git a/src/bin/cfgmgr/plugins/b10logging.py b/src/bin/cfgmgr/plugins/b10logging.py
index 6af3f66..e288c6d 100644
--- a/src/bin/cfgmgr/plugins/b10logging.py
+++ b/src/bin/cfgmgr/plugins/b10logging.py
@@ -48,6 +48,19 @@ def check(config):
         for logger in config['loggers']:
             # name should always be present
             name = logger['name']
+            # report an error if name starts with * but not *.,
+            # or if * is not the first character.
+            # TODO: we might want to also warn or error if the
+            # logger name is not an existing module, but we can't
+            # really tell that from here at this point
+            star_pos = name.find('*')
+            if star_pos > 0 or\
+               name == '*.' or\
+               (star_pos == 0 and len(name) > 1 and name[1] != '.'):
+                errors.append("Bad logger name: '" + name + "': * can "
+                              "only be used instead of the full "
+                              "first-level name, e.g. '*' or "
+                              "'*.subsystem'")
 
             if 'severity' in logger and\
                logger['severity'].lower() not in ALLOWED_SEVERITIES:
@@ -71,11 +84,11 @@ def check(config):
                                'output' in output_option and\
                                output_option['output'] not in ALLOWED_STREAMS:
                                 errors.append("bad output for logger " + name +
-                                              ": " + output_option['stream'] +
+                                              ": " + output_option['output'] +
                                               ", must be stdout or stderr")
                             elif destination == "file" and\
-                                 'output' not in output_option or\
-                                 output_option['output'] == "":
+                                 ('output' not in output_option or\
+                                  output_option['output'] == ""):
                                     errors.append("destination set to file but "
                                                   "output not set to any "
                                                   "filename for logger "
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 725d391..07b7a85 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -1,5 +1,5 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = tsig_keys_test.py
+PYTESTS = tsig_keys_test.py logging_test.py
 
 EXTRA_DIST = $(PYTESTS)
 
diff --git a/src/bin/cfgmgr/plugins/tests/logging_test.py b/src/bin/cfgmgr/plugins/tests/logging_test.py
new file mode 100644
index 0000000..818a596
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/logging_test.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Make sure we can load the module, put it into path
+import sys
+import os
+sys.path.extend(os.environ["B10_TEST_PLUGIN_DIR"].split(':'))
+
+import b10logging
+import unittest
+
+class LoggingConfCheckTest(unittest.TestCase):
+    def test_load(self):
+        """
+        Checks the entry point returns the correct values.
+        """
+        (spec, check) = b10logging.load()
+        # It returns the checking function
+        self.assertEqual(check, b10logging.check)
+        # The plugin stores it's spec
+        self.assertEqual(spec, b10logging.spec)
+
+    def test_logger_conf(self):
+        self.assertEqual(None,
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'DEBUG',
+                                            'debuglevel': 50,
+                                            'output_options':
+                                            [{'destination': 'file',
+                                              'output': '/some/file'
+                                            }]
+                                           },
+                                           {'name': 'b10-resolver',
+                                            'severity': 'WARN',
+                                            'additive': True,
+                                            'output_options':
+                                            [{'destination': 'console',
+                                              'output': 'stderr',
+                                              'flush': True
+                                            }]
+                                           },
+                                           {'name': 'b10-resolver.resolver',
+                                            'severity': 'ERROR',
+                                            'output_options': []
+                                           },
+                                           {'name': '*.cache',
+                                            'severity': 'INFO'
+                                           }
+                                          ]}))
+    def do_bad_name_test(self, name):
+        err_str = "Bad logger name: '" + name + "': * can only be "\
+                  "used instead of the full first-level name, e.g. "\
+                  "'*' or '*.subsystem'"
+        self.assertEqual(err_str,
+                         b10logging.check({'loggers':
+                                          [{'name': name,
+                                            'severity': 'DEBUG'},
+                                          ]}))
+        
+    def test_logger_bad_name(self):
+        self.do_bad_name_test("*.")
+        self.do_bad_name_test("*foo")
+        self.do_bad_name_test("*foo.lib")
+        self.do_bad_name_test("foo*")
+        self.do_bad_name_test("foo*.lib")
+
+    def test_logger_bad_severity(self):
+        self.assertEqual('bad severity value for logger *: BADVAL',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'BADVAL'}]}))
+
+    def test_logger_bad_destination(self):
+        self.assertEqual('bad destination for logger *: baddest',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'baddest' }
+                                            ]}]}))
+
+    def test_logger_bad_console_output(self):
+        self.assertEqual('bad output for logger *: bad_output, must be stdout or stderr',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'console',
+                                              'output': 'bad_output'
+                                            }
+                                            ]}]}))
+
+    def test_logger_bad_file_output(self):
+        self.assertEqual('destination set to file but output not set to any filename for logger *',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'file' }
+                                            ]}]}))
+
+    def test_logger_bad_syslog_output(self):
+        self.assertEqual('destination set to syslog but output not set to any facility for logger *',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'syslog' }
+                                            ]}]}))
+
+    def test_logger_bad_type(self):
+        self.assertEqual('123 should be a string',
+                         b10logging.check({'loggers':
+                                          [{'name': 123,
+                                            'severity': 'INFO'}]}))
+        self.assertEqual('123 should be a string',
+                         b10logging.check({'loggers':
+                                          [{'name': 'bind10',
+                                            'severity': 123}]}))
+
+if __name__ == '__main__':
+        unittest.main()
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index 8a29949..0af9be6 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,12 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
 
 b10_xfrindir = $(pkgdatadir)
 b10_xfrin_DATA = xfrin.spec
+pyexec_DATA = xfrin_messages.py
 
-CLEANFILES = b10-xfrin xfrin.pyc 
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
 
 man_MANS = b10-xfrin.8
 EXTRA_DIST = $(man_MANS) b10-xfrin.xml
-EXTRA_DIST += xfrin.spec
+EXTRA_DIST += xfrin.spec xfrin_messages.mes
 
 if ENABLE_MAN
 
@@ -20,8 +21,12 @@ b10-xfrin.8: b10-xfrin.xml
 
 endif
 
+# Define rule to build logging source files from message file
+xfrin_messages.py: xfrin_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py
+b10-xfrin: xfrin.py xfrin_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
 	chmod a+x $@
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index a9ca0f2..64e3563 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -29,12 +29,17 @@ from isc.config.ccsession import *
 from isc.notify import notify_out
 import isc.util.process
 import isc.net.parse
+from xfrin_messages import *
+
+isc.log.init("b10-xfrin")
+logger = isc.log.Logger("xfrin")
+
 try:
     from pydnspp import *
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrin process
     # must keep running, so we warn about it and move forward.
-    sys.stderr.write('[b10-xfrin] failed to import DNS module: %s\n' % str(e))
+    logger.error(XFRIN_IMPORT_DNS, str(e))
 
 isc.util.process.rename()
 
@@ -69,9 +74,6 @@ __version__ = 'BIND10'
 XFRIN_OK = 0
 XFRIN_FAIL = 1
 
-def log_error(msg):
-    sys.stderr.write("[b10-xfrin] %s\n" % str(msg))
-
 class XfrinException(Exception):
     pass
 
@@ -150,8 +152,7 @@ class XfrinConnection(asyncore.dispatcher):
             self.connect(self._master_address)
             return True
         except socket.error as e:
-            self.log_msg('Failed to connect:(%s), %s' % (self._master_address,
-                                                            str(e)))
+            logger.error(CONNECT_MASTER, self._master_address, str(e))
             return False
 
     def _create_query(self, query_type):
@@ -264,31 +265,27 @@ class XfrinConnection(asyncore.dispatcher):
                 logstr = 'SOA check for \'%s\' ' % self._zone_name
                 ret =  self._check_soa_serial()
 
-            logstr = 'transfer of \'%s\': AXFR ' % self._zone_name
             if ret == XFRIN_OK:
-                self.log_msg(logstr + 'started')
+                logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
                 self._send_query(RRType.AXFR())
                 isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
                                             self._handle_xfrin_response)
 
-                self.log_msg(logstr + 'succeeded')
+                logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
 
         except XfrinException as e:
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
             #TODO, recover data source.
         except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
         except UserWarning as e:
             # XXX: this is an exception from our C++ library via the
             # Boost.Python binding.  It would be better to have more more
             # specific exceptions, but at this moment this is the finest
             # granularity.
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
         finally:
            self.close()
@@ -395,11 +392,6 @@ class XfrinConnection(asyncore.dispatcher):
         # Overwrite the log function, log nothing
         pass
 
-    def log_msg(self, msg):
-        if self._verbose:
-            sys.stdout.write('[b10-xfrin] %s\n' % str(msg))
-
-
 def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
                   shutdown_event, master_addrinfo, check_soa, verbose,
                   tsig_key):
@@ -481,8 +473,8 @@ class ZoneInfo:
             try:
                 self.master_addr = isc.net.parse.addr_parse(master_addr_str)
             except ValueError:
+                logger.error(XFRIN_BAD_MASTER_ADDR_FORMAT, master_addr_str)
                 errmsg = "bad format for zone's master: " + master_addr_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_master_port(self, master_port_str):
@@ -496,8 +488,8 @@ class ZoneInfo:
             try:
                 self.master_port = isc.net.parse.port_parse(master_port_str)
             except ValueError:
+                logger.error(XFRIN_BAD_MASTER_PORT_FORMAT, master_port_str)
                 errmsg = "bad format for zone's master port: " + master_port_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_zone_class(self, zone_class_str):
@@ -514,8 +506,8 @@ class ZoneInfo:
             try:
                 self.rrclass = RRClass(zone_class_str)
             except InvalidRRClass:
+                logger.error(XFRIN_BAD_ZONE_CLASS, zone_class_str)
                 errmsg = "invalid zone class: " + zone_class_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_tsig_key(self, tsig_key_str):
@@ -529,8 +521,8 @@ class ZoneInfo:
             try:
                 self.tsig_key = TSIGKey(tsig_key_str)
             except InvalidParameter as ipe:
+                logger.error(XFRIN_BAD_TSIG_KEY_STRING, tsig_key_str)
                 errmsg = "bad TSIG key string: " + tsig_key_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def get_master_addr_info(self):
@@ -556,7 +548,8 @@ class Xfrin:
         self._send_cc_session = isc.cc.Session()
         self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
                                               self.config_handler,
-                                              self.command_handler)
+                                              self.command_handler,
+                                              None, True)
         self._module_cc.start()
         config_data = self._module_cc.get_full_config()
         self.config_handler(config_data)
@@ -635,7 +628,7 @@ class Xfrin:
                 if zone_info is None:
                     # TODO what to do? no info known about zone. defaults?
                     errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
-                    log_error(errmsg)
+                    logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
                     answer = create_answer(1, errmsg)
                 else:
                     master_addr = zone_info.get_master_addr_info()
@@ -670,7 +663,7 @@ class Xfrin:
             else:
                 answer = create_answer(1, 'unknown command: ' + command)
         except XfrinException as err:
-            log_error('error happened for command: %s, %s' % (command, str(err)) )
+            logger.error(XFRIN_COMMAND_ERROR, command, str(err))
             answer = create_answer(1, str(err))
         return answer
 
@@ -762,8 +755,7 @@ class Xfrin:
                 except isc.cc.session.SessionTimeout:
                     pass        # for now we just ignore the failure
             except socket.error as err:
-                log_error("Fail to send message to %s and %s, msgq may has been killed"
-                          % (XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME))
+                logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME)
         else:
             msg = create_command(ZONE_XFRIN_FAILED, param)
             # catch the exception, in case msgq has been killed.
@@ -775,8 +767,7 @@ class Xfrin:
                 except isc.cc.session.SessionTimeout:
                     pass        # for now we just ignore the failure
             except socket.error as err:
-                log_error("Fail to send message to %s, msgq may has been killed"
-                          % ZONE_MANAGER_MODULE_NAME)
+                logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
 
     def startup(self):
         while not self._shutdown_event.is_set():
@@ -844,12 +835,11 @@ def main(xfrin_class, use_signal = True):
         xfrind = xfrin_class(verbose = options.verbose)
         xfrind.startup()
     except KeyboardInterrupt:
-        log_error("exit b10-xfrin")
+        logger.info(XFRIN_STOPPED_BY_KEYBOARD)
     except isc.cc.session.SessionError as e:
-        log_error(str(e))
-        log_error('Error happened! is the command channel daemon running?')
+        logger.error(XFRIN_CC_SESSION_ERROR, str(e))
     except Exception as e:
-        log_error(str(e))
+        logger.error(XFRIN_UNKNOWN_ERROR, str(e))
 
     if xfrind:
         xfrind.shutdown()
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
new file mode 100644
index 0000000..80a0be3
--- /dev/null
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -0,0 +1,91 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+
+% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+
+% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
+The AXFR transfer of the given zone was successfully completed.
+
+% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
+The given master address is not a valid IP address.
+
+% XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1
+The master port as read from the configuration is not a valid port number.
+
+% XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFRIN_BAD_ZONE_CLASS Invalid zone class: %1
+The zone class as read from the configuration is not a valid DNS class.
+
+% XFRIN_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+
+% XFRIN_COMMAND_ERROR error while executing command '%1': %2
+There was an error while the given command was being processed. The
+error is given in the log message.
+
+% XFRIN_CONNECT_MASTER error connecting to master at %1: %2
+There was an error opening a connection to the master. The error is
+shown in the log message.
+
+% XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+
+% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+
+% XFRIN_IMPORT_DNS error importing python DNS module: %1
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+
+% XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+
+% XFRIN_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+
+% XFRIN_UNKNOWN_ERROR unknown error: %1
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index 890851e..defaf13 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -7,11 +7,15 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
 
 # The core library
 lib_LTLIBRARIES = libacl.la
-libacl_la_SOURCES = check.h acl.h
+libacl_la_SOURCES  = acl.h
+libacl_la_SOURCES += check.h
+libacl_la_SOURCES += ip_check.h ip_check.cc
+libacl_la_SOURCES += logic_check.h
 libacl_la_SOURCES += loader.h loader.cc
 
 libacl_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 libacl_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
 
 # DNS specialized one
 lib_LTLIBRARIES += libdnsacl.la
diff --git a/src/lib/acl/ip_check.cc b/src/lib/acl/ip_check.cc
new file mode 100644
index 0000000..08c8431
--- /dev/null
+++ b/src/lib/acl/ip_check.cc
@@ -0,0 +1,111 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/lexical_cast.hpp>
+
+#include <acl/ip_check.h>
+
+using namespace std;
+
+// Split the IP Address prefix
+
+namespace isc {
+namespace acl {
+namespace internal {
+
+uint8_t
+createMask(size_t prefixlen) {
+
+    if (prefixlen == 0) {
+        return (0);
+
+    } else if (prefixlen <= 8) {
+
+        // In the following discussion:
+        //
+        // w is the width of the data type in bits.
+        // m is the value of prefixlen, the number of most signifcant bits we
+        // want to set.
+        // ** is exponentiation (i.e. 2**n is 2 raised to the power of n).
+        //
+        // We note that the value of 2**m - 1 gives a value with the least
+        // significant m bits set.  For a data type of width w, this means that
+        // the most signficant (w-m) bits are clear.
+        //
+        // Hence the value 2**(w-m) - 1 gives a result with the least signficant
+        // w-m bits set and the most significant m bits clear.  The 1's
+        // complement of this value gives is the result we want.
+        //
+        // Final note: at this point in the logic, m is non-zero, so w-m < w.
+        // This means 1<<(w-m) will fit into a variable of width w bits.  In
+        // other words, in the expression below, no term will cause an integer
+        // overflow.
+        return (~((1 << (8 - prefixlen)) - 1));
+    }
+
+    // Mask size is too large. (Note that prefixlen is unsigned, so can't be
+    // negative.)
+    isc_throw(isc::OutOfRange, "prefixlen argument must be between 0 and 8");
+}
+
+pair<string, int>
+splitIPAddress(const string& ipprefix) {
+
+    // Split string into its components - an address and a prefix length.
+    // We initialize by assuming that there is no slash in the string given.
+    string address = ipprefix;
+    string prefixlen = "";
+
+    const size_t slashpos = ipprefix.find('/');
+    if ((ipprefix.size() == 0) || (slashpos == 0) ||
+        (slashpos == (ipprefix.size() - 1))) {
+        // Nothing in prefix, or it starts with or ends with a slash.
+        isc_throw(isc::InvalidParameter, "address prefix of " << ipprefix <<
+                                         " is not valid");
+
+    } else if (slashpos != string::npos) {
+        // There is a slash somewhere in the string, split the string on it.
+        // Don't worry about multiple slashes - if there are some, they will
+        // appear in the prefixlen segment and will be detected when an attempt
+        // is made to convert it to a number.
+        address = ipprefix.substr(0, slashpos);
+        prefixlen = ipprefix.substr(slashpos + 1);
+    }
+
+    // Set the default value for the prefix length.  As the type of the address
+    // is not known at the point this function is called, the maximum
+    // allowable value is also not known.  The value of 0 is reserved for
+    // a "match any address" match.
+    int prefix_size = -1;
+
+    // If there is a prefixlength, attempt to convert it.
+    if (!prefixlen.empty()) {
+        try {
+            prefix_size = boost::lexical_cast<int>(prefixlen);
+            if (prefix_size < 0) {
+                isc_throw(isc::InvalidParameter, "address prefix of " <<
+                          ipprefix << " is not valid");
+            }
+        } catch (boost::bad_lexical_cast&) {
+            isc_throw(isc::InvalidParameter, "prefix length of '" <<
+                      prefixlen << "' is not valid");
+        }
+    }
+
+    return (make_pair(address, prefix_size));
+}
+
+} // namespace internal
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/acl/ip_check.h b/src/lib/acl/ip_check.h
new file mode 100644
index 0000000..5bc70fc
--- /dev/null
+++ b/src/lib/acl/ip_check.h
@@ -0,0 +1,354 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IP_CHECK_H
+#define __IP_CHECK_H
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+#include <boost/static_assert.hpp>
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sys/socket.h> // for AF_INET/AF_INET6
+#include <netinet/in.h>
+
+#include <acl/check.h>
+#include <exceptions/exceptions.h>
+#include <util/strutil.h>
+
+namespace isc {
+namespace acl {
+
+// Free functions.  These are not supposed to be used outside this module,
+// but are declared public for testing.  To try to conceal them, they are
+// put in an "internal" namespace.
+
+namespace internal {
+
+/// \brief Convert prefix length to mask
+///
+/// Given a prefix length and a data type, return a value of that data type
+/// with the most significant "prefix length" bits set.  For example, if the
+/// data type is an uint8_t and the prefix length is 3, the function would
+/// return a uint8_t holding the binary value 11100000.  This value is used as
+/// a mask in the address checks.
+///
+/// \param prefixlen number of bits to be set in the mask.  This must be
+///        between 0 and 8.
+///
+/// \return uint8_t with the most significant "prefixlen" bits set.
+///
+/// \exception OutOfRange prefixlen is too large for the data type.
+
+uint8_t createMask(size_t prefixlen);
+
+/// \brief Split IP Address Prefix
+///
+/// Splits an IP address prefix (given in the form of "xxxxxx/n" or "xxxxx" into
+/// a string representing the IP address and a number giving the length of the
+/// prefix. (In the latter case, the prefix is equal in length to the width in
+/// width in bits of the data type holding the address.) An exception will be
+/// thrown if the string format is invalid or if the prefix length is invalid.
+///
+/// N.B. This function does NOT check that the address component is a valid IP
+/// address; this is done elsewhere in the address parsing process.
+///
+/// \param ipprefix Address or address prefix.  The string should be passed
+///                 without leading or trailing spaces.
+///
+/// \return Pair of (string, int) holding the address string and the prefix
+///         length.  The second element is -1 if no prefix was given.
+///
+/// \exception InvalidParameter Address prefix not of the expected syntax
+
+std::pair<std::string, int>
+splitIPAddress(const std::string& ipprefix);
+
+} // namespace internal
+
+
+
+/// \brief IP Check
+///
+/// This class performs a match between an IP address prefix specified in an ACL
+/// and a given IP address.  The check works for both IPv4 and IPv6 addresses.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+
+template <typename Context>
+class IPCheck : public Check<Context> {
+private:
+    // Size of uint8_t array needed to hold different address types
+    static const size_t IPV6_SIZE = sizeof(struct in6_addr);
+    static const size_t IPV4_SIZE = sizeof(struct in_addr);
+
+    // Confirm our assumption of relative sizes - this allows us to assume that
+    // an array sized for an IPv6 address can hold an IPv4 address.
+    BOOST_STATIC_ASSERT(sizeof(struct in6_addr) > sizeof(struct in_addr));
+
+public:
+    /// \brief String Constructor
+    ///
+    /// Constructs an IP Check object from an address or address prefix in the
+    /// form <ip-address>/n".
+    ///
+    /// Also allowed are the special keywords "any4" and "any6", which match
+    /// any IPv4 or IPv6 address.  These must be specified in lowercase.
+    ///
+    /// \param ipprefix IP address prefix in the form "<ip-address>/n"
+    ///        (where the "/n" part is optional and should be valid for the
+    ///        address).  If "n" is specified as zero, the match is for any
+    ///        address in that address family.  The address can also be
+    ///        given as "any4" or "any6".
+    IPCheck(const std::string& ipprefix) : family_(0) {
+
+        // Ensure array elements are correctly initialized with zeroes.
+        std::fill(address_, address_ + IPV6_SIZE, 0);
+        std::fill(mask_, mask_ + IPV6_SIZE, 0);
+
+        // Only deal with the string after we've removed leading and trailing
+        // spaces.
+        const std::string mod_prefix = isc::util::str::trim(ipprefix);
+
+        // Check for special cases first.
+        if (mod_prefix == "any4") {
+            family_ = AF_INET;
+
+        } else if (mod_prefix == "any6") {
+            family_ = AF_INET6;
+
+        } else {
+
+            // General address prefix.  Split into address part and prefix
+            // length.
+            const std::pair<std::string, int> result =
+                internal::splitIPAddress(mod_prefix);
+
+            // Try to convert the address.  If successful, the result is in
+            // network-byte order (most significant components at lower
+            // addresses).
+            int status = inet_pton(AF_INET6, result.first.c_str(), address_);
+            if (status == 1) {
+                // It was an IPv6 address.
+                family_ = AF_INET6;
+            } else {
+                // IPv6 interpretation failed, try IPv4.
+                status = inet_pton(AF_INET, result.first.c_str(), address_);
+                if (status == 1) {
+                    family_ = AF_INET;
+                }
+            }
+
+            // Handle errors.
+            if (status == 0) {
+                isc_throw(isc::InvalidParameter, "address prefix of " <<
+                          ipprefix << " is not valid");
+            } else if (status < 0) {
+                isc_throw(isc::Unexpected, "address conversion of " <<
+                          ipprefix << " failed due to a system error");
+            }
+
+            // All done, so set the mask used in the address comparison.
+            setMask(result.second);
+        }
+    }
+
+    /// \brief Destructor
+    virtual ~IPCheck() {}
+
+    /// \brief The check itself
+    ///
+    /// Matches the passed argument to the condition stored here.  Different
+    /// specialisations must be provided for different argument types, and the
+    /// program will fail to compile if a required specialisation is not
+    /// provided.
+    ///
+    /// It is expected that matches() will extract the address information from
+    /// the Context structure, and use compare() to actually perform the
+    /// comparison.
+    ///
+    /// \param context Information to be matched
+    virtual bool matches(const Context& context) const;
+
+    /// \brief Estimated cost
+    ///
+    /// Assume that the cost of the match is linear and depends on the
+    /// maximum number of comparison operations.
+    ///
+    /// \return Estimated cost of the comparison
+    virtual unsigned cost() const {
+        return ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+    }
+
+    ///@{
+    /// Access methods - mainly for testing
+
+    /// \return Stored IP address
+    std::vector<uint8_t> getAddress() const {
+        const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+        return (std::vector<uint8_t>(address_, address_ + vector_len));
+    }
+
+    /// \return Network mask applied to match
+    std::vector<uint8_t> getMask() const {
+        const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+        return (std::vector<uint8_t>(mask_, mask_ + vector_len));
+    }
+
+    /// \return Prefix length of the match
+    size_t getPrefixlen() const {
+        // Work this out by counting bits in the mask.
+        size_t count = 0;
+        for (size_t i = 0; i < IPV6_SIZE; ++i) {
+            if (mask_[i] == 0xff) {
+                // All bits set in this byte
+                count += 8;
+                continue;
+
+            } else if (mask_[i] != 0) {
+                // Only some bits set in this byte.  Count them.
+                uint8_t byte = mask_[i];
+                for (int j = 0; j < 8; ++j) {
+                    count += byte & 0x01;   // Add one if the bit is set
+                    byte >>= 1;             // Go for next bit
+                }
+            }
+            break;
+        }
+        return (count);
+    }
+
+    /// \return Address family
+    int getFamily() const {
+        return (family_);
+    }
+    ///@}
+
+protected:
+    /// \brief Comparison
+    ///
+    /// This is the actual comparison function that checks the IP address passed
+    /// to this class with the matching information in the class itself.  It is
+    /// expected to be called from matches().
+    ///
+    /// \param testaddr Address (in network byte order) to test against the
+    ///                 check condition in the class.  This is expected to
+    ///                 be IPV6_SIZE or IPV4_SIZE bytes long.
+    /// \param family   Address family of testaddr.
+    ///
+    /// \return true if the address matches, false if it does not.
+    virtual bool compare(const uint8_t* testaddr, int family) const {
+
+        if (family != family_) {
+            // Can't match if the address is of the wrong family
+            return (false);
+        }
+
+        // Simple check failed, so have to do a complete match.  To check that
+        // the address given matches the stored network address and mask, we
+        // check the simple condition that:
+        //
+        //     address_given & mask_ == stored_address & mask_
+        //
+        // The result is checked for all bytes for which there are bits set in
+        // the mask.  We stop at the first non-match (or when we run out of bits
+        // in the mask).
+        //
+        // Note that the mask represents a contiguous set of bits.  As such, as
+        // soon as we find a mask byte of zeroes, we have run past the part of
+        // the address where we need to match.
+        //
+        // Note also that when checking an IPv4 address, the constructor has
+        // set all bytes in the mask beyond the first four bytes to zero.
+        // As the loop stops when it encounters a zero mask byte, if the
+        // ACL is for an IPV4 address, the loop will never check more than four
+        // bytes.
+
+        bool match = true;
+        for (int i = 0; match && (i < IPV6_SIZE) && (mask_[i] != 0); ++i) {
+             match = ((testaddr[i] & mask_[i]) == (address_[i] & mask_[i]));
+        }
+        return (match);
+    }
+
+private:
+    /// \brief Set Mask
+    ///
+    /// Sets up the mask from the prefix length.  This involves setting
+    /// an individual mask in each byte of the mask array.
+    ///
+    /// The actual allowed value of the prefix length depends on the address
+    /// family.
+    ///
+    /// \param requested Requested prefix length size.  If negative, the
+    ///        maximum for the address family is assumed.  (A negative value
+    ///        will arise if the string constructor was used and no mask size
+    ///        was given.)
+    void setMask(int requested) {
+
+        // Set the maximum number of bits allowed in the mask, and request
+        // that number of bits if no prefix length was given in the constructor.
+        const int maxmask = 8 * ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+        if (requested < 0) {
+            requested = maxmask;
+        }
+
+        // Validate that the mask is valid.
+        if (requested <= maxmask) {
+
+            // Loop, setting the bits in the set of mask bytes until all the
+            // specified bits have been used up.  As both IPv4 and IPv6
+            // addresses are stored in network-byte order, this works in
+            // both cases.
+            size_t bits_left = requested;   // Bits remaining to set
+            int i = -1;
+            while (bits_left > 0) {
+                if (bits_left >= 8) {
+                    mask_[++i] = ~0;  // All bits set
+                    bits_left -= 8;
+
+                } else if (bits_left > 0) {
+                    mask_[++i] = internal::createMask(bits_left);
+                    bits_left = 0;
+                }
+            }
+        } else {
+            isc_throw(isc::OutOfRange,
+                      "mask size of " << requested << " is invalid " <<
+                      "for the given address family");
+        }
+    }
+
+    // Member variables.
+    uint8_t address_[IPV6_SIZE];  ///< Address in binary form
+    uint8_t mask_[IPV6_SIZE];     ///< Address mask
+    int     family_;              ///< Address family
+};
+
+// Some compilers seem to need this to be explicitly defined outside the class
+template <typename Context>
+const size_t IPCheck<Context>::IPV6_SIZE;
+
+template <typename Context>
+const size_t IPCheck<Context>::IPV4_SIZE;
+
+} // namespace acl
+} // namespace isc
+
+#endif // __IP_CHECK_H
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
index 11e7ebc..c3400cb 100644
--- a/src/lib/acl/loader.h
+++ b/src/lib/acl/loader.h
@@ -24,6 +24,10 @@
 namespace isc {
 namespace acl {
 
+class AnyOfSpec;
+class AllOfSpec;
+template<typename Mode, typename Context> class LogicOperator;
+
 /**
  * \brief Exception for bad ACL specifications.
  *
@@ -263,7 +267,7 @@ public:
      * \param description The JSON description of the check.
      */
     boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
-                                                 description)
+                                                 description) const
     {
         // Get the description as a map
         typedef std::map<std::string, data::ConstElementPtr> Map;
@@ -290,7 +294,7 @@ public:
      * \param description The JSON list of ACL.
      */
     boost::shared_ptr<ACL<Context, Action> > load(const data::ConstElementPtr&
-                                                  description)
+                                                  description) const
     {
         // We first check it's a list, so we can use the list reference
         // (the list may be huge)
@@ -346,7 +350,7 @@ private:
      *     the map.
      */
     boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
-                                                 description, Map& map)
+                                                 description, Map& map) const
     {
         // Remove the action keyword
         map.erase("action");
@@ -367,18 +371,45 @@ private:
                 }
                 if (creatorIt->second->allowListAbbreviation() &&
                     checkDesc->second->getType() == data::Element::list) {
-                    isc_throw_1(LoaderError,
-                                "Not implemented (OR-abbreviated form)",
-                                checkDesc->second);
+                    // Or-abbreviated form - create an OR and put everything
+                    // inside.
+                    const std::vector<data::ConstElementPtr>&
+                        params(checkDesc->second->listValue());
+                    boost::shared_ptr<LogicOperator<AnyOfSpec, Context> >
+                        oper(new LogicOperator<AnyOfSpec, Context>);
+                    for (std::vector<data::ConstElementPtr>::const_iterator
+                             i(params.begin());
+                         i != params.end(); ++i) {
+                        oper->addSubexpression(
+                            creatorIt->second->create(name, *i, *this));
+                    }
+                    return (oper);
                 }
                 // Create the check and return it
                 return (creatorIt->second->create(name, checkDesc->second,
                                                   *this));
             }
-            default:
-                isc_throw_1(LoaderError,
-                            "Not implemented (AND-abbreviated form)",
-                            description);
+            default: {
+                // This is the AND-abbreviated form. We need to create an
+                // AND (or "ALL") operator, loop trough the whole map and
+                // fill it in. We do a small trick - we create bunch of
+                // single-item maps, call this loader recursively (therefore
+                // it will get into the "case 1" branch, where there is
+                // the actual loading) and use the results to fill the map.
+                //
+                // We keep the description the same, there's nothing we could
+                // take out (we could create a new one, but that would be
+                // confusing, as it is used for error messages only).
+                boost::shared_ptr<LogicOperator<AllOfSpec, Context> >
+                    oper(new LogicOperator<AllOfSpec, Context>);
+                for (Map::const_iterator i(map.begin()); i != map.end(); ++i) {
+                    Map singleSubexpr;
+                    singleSubexpr.insert(*i);
+                    oper->addSubexpression(loadCheck(description,
+                                                     singleSubexpr));
+                }
+                return (oper);
+            }
         }
     }
     /**
@@ -401,4 +432,17 @@ private:
 }
 }
 
+/*
+ * This include at the end of the file is unusual. But we need to include it,
+ * we use template classes from there. However, they need to be present only
+ * at instantiation of our class, which will happen below this header.
+ *
+ * The problem is, the header uses us as well, therefore there's a circular
+ * dependency. If we loaded it at the beginning and someone loaded us first,
+ * the logic_check header wouldn't have our definitions. This way, no matter
+ * in which order they are loaded, the definitions from this header will be
+ * above the ones from logic_check.
+ */
+#include "logic_check.h"
+
 #endif
diff --git a/src/lib/acl/logic_check.h b/src/lib/acl/logic_check.h
new file mode 100644
index 0000000..6e1c567
--- /dev/null
+++ b/src/lib/acl/logic_check.h
@@ -0,0 +1,206 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOGIC_CHECK_H
+#define ACL_LOGIC_CHECK_H
+
+#include "check.h"
+#include "loader.h"
+
+namespace isc {
+namespace acl {
+
+/// \brief Constants for the AnyOf implementation
+class AnyOfSpec {
+public:
+    static bool start() { return (false); }
+    static bool terminate(const bool another) {
+        return (another);
+    }
+};
+
+/// \brief Constants for the AllOf implementation
+class AllOfSpec {
+public:
+    static bool start() { return (true); }
+    static bool terminate(const bool another) {
+        return (!another);
+    }
+};
+
+/**
+ * \brief Logic operators
+ *
+ * This class implements the AllOf and AnyOf compound checks. As their
+ * behaviour is almost the same, the same template class is used. Which
+ * one it is depends on the Mode template parameter. The Mode should be
+ * one of AnyOfSpec or AllOfSpec, which provide some commands for the
+ * internal implementation. It would be nice to provide typedefs for
+ * them, but it is impossible to do so, as we have the Context template
+ * parameter as well and C++ doesn't like templated typedefs.
+ *
+ * The object holds several subexpressions and returns true if all
+ * of the subexpressions return true (in case of AllOfSpec Mode) or
+ * at last one of them return true (in case of AnyOfSpec Mode). If
+ * some subexpression guarantees the result (eg. some returns false
+ * in case of AllOfSpec), the rest is not tried for performance
+ * reasons.
+ */
+template<typename Mode, typename Context>
+class LogicOperator : public CompoundCheck<Context> {
+public:
+    /**
+     * \brief Add another subexpression.
+     *
+     * This adds another subexpression to the list of checked expressions.
+     * This is usually done shortly after the creation, before using the
+     * check for matches.
+     *
+     * Currently there's no way to place the expression into arbitrary place
+     * or to remove it. It might turn out it would be needed in future to
+     * optimise or it might even turn out we need shared pointers for it.
+     *
+     * \param expr The new expression to put inside.
+     */
+    void addSubexpression(const boost::shared_ptr<Check<Context> >& expr) {
+        checks_.push_back(expr);
+    }
+    /**
+     * \brief The current list of subexpressions.
+     */
+    virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+        typename CompoundCheck<Context>::Checks result;
+        for (typename Checks::const_iterator i(checks_.begin());
+             i != checks_.end(); ++i) {
+            result.push_back(i->get());
+        }
+        return (result);
+    }
+    /**
+     * \brief The match of the check.
+     *
+     * Runs the subexpressions, one by one, and then decides based on that
+     * what to return.
+     */
+    virtual bool matches(const Context& context) const {
+        /*
+         * This might look slightly complicated. However, this is just
+         * generalized version of multi-and or multi-or. The usual
+         * implementation of multi-and starts with true and if one with
+         * false is found, it turns to be false forever and false is
+         * returned. It is exactly the other way around with or.
+         *
+         * So, if we ever find one that makes it the other one than start
+         * (false in case of and, true in case of or), we can just stop and
+         * return that one right away. If it meets no such expression, we
+         * get to the end and return the default.
+         */
+        for (typename Checks::const_iterator i(checks_.begin());
+             i != checks_.end(); ++i) {
+            if (Mode::terminate((*i)->matches(context))) {
+                return (!Mode::start());
+            }
+        }
+        return (Mode::start());
+    }
+private:
+    /// \brief List of subexpressions
+    typedef typename std::vector<boost::shared_ptr<Check<Context> > > Checks;
+    Checks checks_;
+};
+
+/**
+ * \brief Creator for the LogicOperator compound check.
+ *
+ * This class can load the ANY and ALL operators from JSON. They expect
+ * a list of subexpressions as a parameter, eg. like this:
+ *
+ * \verbatim
+ * {"ANY": [
+ *    {"ip": "1.2.3.4"},
+ *    {"ip": "5.6.7.8"}
+ * ]}
+ * \endverbatim
+ *
+ * It uses the loader to load the subexpressions, therefore whatever is
+ * supported there is supported here as well.
+ *
+ * The Mode template parameter has the same meaning as with LogicOperator,
+ * it is used to know which operators to create.
+ */
+template<typename Mode, typename Context, typename Action = BasicAction>
+class LogicCreator : public Loader<Context, Action>::CheckCreator {
+public:
+    /**
+     * \brief Constructor.
+     *
+     * \param name The name for which the loader will work. In practice,
+     *     it will usually be ANY or ALL (depending on the mode), but
+     *     anything else can be used as well.
+     */
+    LogicCreator(const std::string& name) :
+        name_(name)
+    {}
+    /// \brief Returns vector containing the name.
+    virtual std::vector<std::string> names() const {
+        std::vector<std::string> result;
+        result.push_back(name_);
+        return (result);
+    }
+    /**
+     * \brief Converts a JSON description into the logic operator.
+     *
+     * This is the place where the actual loading happens. It creates
+     * the logic operator and calls the loader on each of the list
+     * elements, placing the result into the logic operator.
+     *
+     * The first parameter is ignored and is there only to match interface.
+     *
+     * \param definition The JSON definition of the subexpressions. This must
+     *     be a list (if it isn't, the LoaderError is thrown) and the elements
+     *     must be loadable by the loader (the exceptions from it are not
+     *     caught).
+     * \param loader The loader to use for loading of subexpressions.
+     */
+    virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+                                                      data::ConstElementPtr
+                                                      definition,
+                                                      const Loader<Context,
+                                                      Action>& loader)
+    {
+        std::vector<data::ConstElementPtr> subexprs;
+        try {
+            subexprs = definition->listValue();
+        }
+        catch (const data::TypeError&) {
+            isc_throw_1(LoaderError, "Logic operator takes list", definition);
+        }
+        boost::shared_ptr<LogicOperator<Mode, Context> >
+            result(new LogicOperator<Mode, Context>);
+        for (std::vector<data::ConstElementPtr>::const_iterator
+                 i(subexprs.begin());
+             i != subexprs.end(); ++i) {
+            result->addSubexpression(loader.loadCheck(*i));
+        }
+        return (result);
+    }
+    virtual bool allowListAbbreviation() const { return (false); }
+private:
+    const std::string name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index 9328c68..ae137a8 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -5,16 +5,22 @@ TESTS =
 if HAVE_GTEST
 TESTS += run_unittests
 run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += check_test.cc acl_test.cc loader_test.cc
-run_unittests_SOURCES += logcheck.h
+run_unittests_SOURCES += acl_test.cc
+run_unittests_SOURCES += check_test.cc
 run_unittests_SOURCES += dns_test.cc
+run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += loader_test.cc
+run_unittests_SOURCES += logcheck.h
+run_unittests_SOURCES += logic_check_test.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 
 run_unittests_LDADD = $(GTEST_LDADD)
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
 endif
diff --git a/src/lib/acl/tests/creators.h b/src/lib/acl/tests/creators.h
new file mode 100644
index 0000000..85f3444
--- /dev/null
+++ b/src/lib/acl/tests/creators.h
@@ -0,0 +1,154 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is not a public header, but some code shared between tests
+// This one contains various creators to test the loader and other creators
+
+#ifndef CREATORS_H
+#define CREATORS_H
+
+#include "logcheck.h"
+#include <acl/loader.h>
+#include <string>
+
+using isc::data::ConstElementPtr;
+using namespace std;
+using namespace boost;
+
+namespace {
+
+// Just for convenience, create JSON objects from JSON string
+ConstElementPtr el(const string& JSON) {
+    return (isc::data::Element::fromJSON(JSON));
+}
+
+// A check that doesn't check anything but remembers it's own name
+// and data
+class NamedCheck : public Check<Log> {
+public:
+    NamedCheck(const string& name, ConstElementPtr data) :
+        name_(name),
+        data_(data)
+    {}
+    virtual bool matches(const Log&) const { return (true); }
+    const string name_;
+    const ConstElementPtr data_;
+};
+
+// The creator of NamedCheck
+class NamedCreator : public Loader<Log>::CheckCreator {
+public:
+    NamedCreator(const string& name, bool abbreviatedList = true) :
+        abbreviated_list_(abbreviatedList)
+    {
+        names_.push_back(name);
+    }
+    NamedCreator(const vector<string>& names) :
+        names_(names),
+        abbreviated_list_(true)
+    {}
+    vector<string> names() const {
+        return (names_);
+    }
+    shared_ptr<Check<Log> > create(const string& name, ConstElementPtr data,
+                                   const Loader<Log>&)
+    {
+        bool found(false);
+        for (vector<string>::const_iterator i(names_.begin());
+             i != names_.end(); ++i) {
+            if (*i == name) {
+                found = true;
+                break;
+            }
+        }
+        EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
+            "doesn't handle it.";
+        return (shared_ptr<Check<Log> >(new NamedCheck(name, data)));
+    }
+    bool allowListAbbreviation() const {
+        return (abbreviated_list_);
+    }
+private:
+    vector<string> names_;
+    const bool abbreviated_list_;
+};
+
+// To be thrown in tests internally
+class TestCreatorError {};
+
+// This will throw every time it should create something
+class ThrowCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("throw");
+        return (result);
+    }
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
+                                   const Loader<Log>&)
+    {
+        throw TestCreatorError();
+    }
+};
+
+// This throws whenever the match is called on it
+class ThrowCheck : public Check<Log> {
+public:
+    virtual bool matches(const Log&) const {
+        throw TestCreatorError();
+    }
+};
+
+// And creator for it
+class ThrowCheckCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("throwcheck");
+        return (result);
+    }
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
+                                   const Loader<Log>&)
+    {
+        return (shared_ptr<Check<Log> >(new ThrowCheck()));
+    }
+};
+
+class LogCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("logcheck");
+        return (result);
+    }
+    /*
+     * For simplicity, we just take two values as a list, first is the
+     * logging cell used, the second is result of the check. No error checking
+     * is done, if there's bug in the test, it will throw TypeError for us.
+     */
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr definition,
+                                   const Loader<Log>&)
+    {
+        vector<ConstElementPtr> list(definition->listValue());
+        int logpos(list[0]->intValue());
+        bool accept(list[1]->boolValue());
+        return (shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
+    }
+    // We take a list, so don't interpret it for us
+    virtual bool allowListAbbreviation() const { return (false); }
+};
+
+}
+
+#endif
diff --git a/src/lib/acl/tests/ip_check_unittest.cc b/src/lib/acl/tests/ip_check_unittest.cc
new file mode 100644
index 0000000..3fcb05b
--- /dev/null
+++ b/src/lib/acl/tests/ip_check_unittest.cc
@@ -0,0 +1,588 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+#include <acl/ip_check.h>
+
+using namespace isc::acl;
+using namespace isc::acl::internal;
+using namespace std;
+
+namespace {
+const size_t IPV4_SIZE = 4;
+const size_t IPV6_SIZE = 16;
+
+// Simple struct holding either an IPV4 or IPV6 address.  This is the "Context"
+// used for the tests.
+//
+// The structure is also used for converting an IPV4 address to a four-byte
+// array.
+struct GeneralAddress {
+    int             family;     // Family of the address
+    vector<uint8_t> addr;       // Address type.  Size indicates what it holds
+
+    // Convert uint32_t address in host-byte order to a uint8_t vector in
+    // network-byte order.
+    vector<uint8_t> convertUint32(uint32_t address) {
+        BOOST_STATIC_ASSERT(sizeof(uint32_t) == IPV4_SIZE);
+
+        vector<uint8_t> result(IPV4_SIZE);
+
+        // Address is in network-byte order, so copy to the array.  The
+        // MS byte is at the lowest address.
+        result[3] = address & 0xff;
+        result[2] = (address >> 8) & 0xff;
+        result[1] = (address >> 16) & 0xff;
+        result[0] = (address >> 24) & 0xff;
+
+        return (result);
+    }
+
+    // Convenience constructor for V4 address.  As it is not marked as explicit,
+    // it allows the automatic promotion of a uint32_t to a GeneralAddress data
+    // type in calls to matches().
+    GeneralAddress(uint32_t address) : family(AF_INET), addr()
+    {
+        addr = convertUint32(address);
+    }
+
+    // Convenience constructor for V6 address.  As it is not marked as explicit,
+    // it allows the automatic promotion of a vector<uint8_t> to a
+    // GeneralAddress data type in calls to matches().
+    GeneralAddress(const vector<uint8_t>& address) : family(AF_INET6),
+                                                     addr(address)
+    {
+        if (address.size() != IPV6_SIZE) {
+            isc_throw(isc::InvalidParameter, "vector passed to GeneralAddress "
+                      "constructor is " << address.size() << " bytes long - it "
+                      "should be " << IPV6_SIZE << " bytes instead");
+        }
+    }
+
+    // A couple of convenience methods for checking equality with different
+    // representations of an address.
+
+    // Check that the IPV4 address is the same as that given.
+    bool equals(uint32_t address) {
+        if (family == AF_INET) {
+            const vector<uint8_t> byte_address = convertUint32(address);
+            return (equal(byte_address.begin(), byte_address.end(),
+                           addr.begin()));
+        }
+        return (false);
+    }
+
+    // Check that the array is equal to that given.
+    bool equals(const vector<uint8_t>& byte_address) {
+        if (addr.size() == byte_address.size()) {
+            return (equal(byte_address.begin(), byte_address.end(),
+                           addr.begin()));
+        }
+        return (false);
+    }
+};
+} // Unnamed namespace
+
+// Provide a specialisation of the IPCheck::matches() method for the
+// GeneralAddress class.
+
+namespace isc  {
+namespace acl {
+template <>
+bool IPCheck<GeneralAddress>::matches(const GeneralAddress& address) const {
+    return (compare(&address.addr[0], address.family));
+}
+} // namespace acl
+} // namespace isc
+
+namespace {
+/// *** Free Function Tests ***
+
+// Test the createMask() function.
+TEST(IPFunctionCheck, CreateMask) {
+
+    // Invalid arguments should throw.
+    EXPECT_THROW(createMask(9), isc::OutOfRange);
+
+    // Check on all possible 8-bit values.
+    uint16_t expected = 0xff00;
+    for (size_t i = 0; i <= 8; ++i, expected >>= 1) {
+        EXPECT_EQ(static_cast<uint8_t>(expected & 0xff), createMask(i));
+    }
+}
+
+// Test the splitIPAddress() function.
+TEST(IPFunctionCheck, SplitIPAddress) {
+    pair<string, uint32_t> result;
+
+    result = splitIPAddress("192.0.2.1");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(-1, result.second);
+
+    result = splitIPAddress("192.0.2.1/24");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(24, result.second);
+
+    result = splitIPAddress("2001:db8::/128");
+    EXPECT_EQ(string("2001:db8::"), result.first);
+    EXPECT_EQ(128, result.second);
+
+    result = splitIPAddress("192.0.2.1/0");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(0, result.second);
+
+    EXPECT_THROW(splitIPAddress("192.0.2.43/27 "), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43/-1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43//1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43/1/"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("/192.0.2.43/1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("2001:db8::/xxxx"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("2001:db8::/32/s"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("1/"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("/1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress(" 1/ "), isc::InvalidParameter);
+}
+
+// *** IPv4 Tests ***
+
+TEST(IPCheck, V4StringConstructor) {
+
+    // Constructor with no prefix length given (32 is assumed).
+    IPCheck<GeneralAddress> acl1("192.0.2.255");
+    EXPECT_EQ(32, acl1.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl1.getFamily());
+
+    vector<uint8_t> stored1 = acl1.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored1.size());
+    GeneralAddress expected1(0xc00002ff);
+    EXPECT_TRUE(expected1.equals(stored1));
+
+    // Constructor with valid mask given
+    IPCheck<GeneralAddress> acl2("192.0.2.0/24");
+    EXPECT_EQ(24, acl2.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl2.getFamily());
+
+    vector<uint8_t> stored2 = acl2.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored2.size());
+    GeneralAddress expected2(0xc0000200);
+    EXPECT_TRUE(expected2.equals(stored2));
+
+    // More valid masks
+    IPCheck<GeneralAddress> acl3("192.0.2.1/0");
+    EXPECT_EQ(0, acl3.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl3.getFamily());
+
+    vector<uint8_t> stored3 = acl3.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored3.size());
+    GeneralAddress expected3(0xc0000201);
+    EXPECT_TRUE(expected3.equals(stored3));
+
+    IPCheck<GeneralAddress> acl4("192.0.2.2/32");
+    EXPECT_EQ(32, acl4.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl4.getFamily());
+
+    vector<uint8_t> stored4 = acl4.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored4.size());
+    GeneralAddress expected4(0xc0000202);
+    EXPECT_TRUE(expected4.equals(stored4));
+
+    // Any match
+    IPCheck<GeneralAddress> acl5("any4");
+    EXPECT_EQ(0, acl5.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl5.getFamily());
+
+    vector<uint8_t> stored5 = acl5.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored5.size());
+    GeneralAddress expected5(0);
+    EXPECT_TRUE(expected5.equals(stored5));
+
+    // Invalid prefix lengths
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/33"), isc::OutOfRange);
+
+    // ... and invalid strings
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/-1"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/24/3"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/ww"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("aa.255.255.0/ww"),
+                 isc::InvalidParameter);
+}
+
+TEST(IPCheck, V4CopyConstructor) {
+    IPCheck<GeneralAddress> acl1("192.0.2.1/24");
+    IPCheck<GeneralAddress> acl2(acl1);
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+    EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+    vector<uint8_t> net1 = acl1.getMask();
+    vector<uint8_t> net2 = acl2.getMask();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+    net1 = acl1.getAddress();
+    net2 = acl2.getAddress();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+TEST(IPCheck, V4AssignmentOperator) {
+    IPCheck<GeneralAddress> acl1("192.0.2.0/24");
+    IPCheck<GeneralAddress> acl2("192.0.2.128/25");
+    acl2 = acl1;
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+    EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+    vector<uint8_t> net1 = acl1.getMask();
+    vector<uint8_t> net2 = acl2.getMask();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+    net1 = acl1.getAddress();
+    net2 = acl2.getAddress();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+// Check that the comparison works - note that "matches" just calls the
+// internal compare() code. (Also note that the argument to matches() will be
+// automatically converted to the GeneralAddress data type used for the tests
+// because of its constructor taking a uint32_t argument.
+
+TEST(IPCheck, V4Compare) {
+    // Exact address - match if given address matches stored address.
+    IPCheck<GeneralAddress> acl1("192.0.2.255/32");
+    EXPECT_TRUE(acl1.matches(0xc00002ff));
+    EXPECT_FALSE(acl1.matches(0xc00002fe));
+    EXPECT_FALSE(acl1.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl2("192.0.2.255/27");
+    EXPECT_TRUE(acl2.matches(0xc00002ff));
+    EXPECT_TRUE(acl2.matches(0xc00002fe));
+    EXPECT_TRUE(acl2.matches(0xc00002ee));
+    EXPECT_FALSE(acl2.matches(0xc00002de));
+    EXPECT_FALSE(acl2.matches(0xd00002fe));
+    EXPECT_FALSE(acl2.matches(0x13457f13));
+
+    // Match if "any4" is specified
+    IPCheck<GeneralAddress> acl3("any4");
+    EXPECT_TRUE(acl3.matches(0xc00002ff));
+    EXPECT_TRUE(acl3.matches(0xc00002fe));
+    EXPECT_TRUE(acl3.matches(0xc00002ee));
+    EXPECT_TRUE(acl3.matches(0xc00002de));
+    EXPECT_TRUE(acl3.matches(0xd00002fe));
+    EXPECT_TRUE(acl3.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl4("0.0.0.0/0");
+    EXPECT_TRUE(acl4.matches(0xc00002ff));
+    EXPECT_TRUE(acl4.matches(0xc00002fe));
+    EXPECT_TRUE(acl4.matches(0xc00002ee));
+    EXPECT_TRUE(acl4.matches(0xc00002de));
+    EXPECT_TRUE(acl4.matches(0xd00002fe));
+    EXPECT_TRUE(acl4.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl5("192.0.2.255/0");
+    EXPECT_TRUE(acl5.matches(0xc00002ff));
+    EXPECT_TRUE(acl5.matches(0xc00002fe));
+    EXPECT_TRUE(acl5.matches(0xc00002ee));
+    EXPECT_TRUE(acl5.matches(0xc00002de));
+    EXPECT_TRUE(acl5.matches(0xd00002fe));
+    EXPECT_TRUE(acl5.matches(0x13457f13));
+}
+
+// *** IPV6 Tests ***
+
+// Some constants used in the tests
+
+const char* V6ADDR_1_STRING = "2001:0db8:1122:3344:5566:7788:99aa:bbcc";
+const uint8_t V6ADDR_1[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x11, 0x22, 0x33, 0x44,
+    0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc
+};
+
+const char* V6ADDR_2_STRING = "2001:0db8::dead:beef";
+const uint8_t V6ADDR_2[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 48 bits
+const uint8_t V6ADDR_2_48[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0xff, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 49 bits
+const uint8_t V6ADDR_2_49[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x7f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 50 bits
+const uint8_t V6ADDR_2_50[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x3f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_51[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x1f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_52[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x0f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 127 bits
+const uint8_t V6ADDR_2_127[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xee
+};
+
+const uint8_t V6ADDR_3[] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const uint8_t V6ADDR_4[] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+TEST(IPCheck, V6StringConstructor) {
+    IPCheck<GeneralAddress> acl1(V6ADDR_1_STRING);
+    vector<uint8_t> address = acl1.getAddress();
+
+    EXPECT_EQ(128, acl1.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl1.getFamily());
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_1));
+
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/51"));
+    address = acl2.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(51, acl2.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl2.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+    IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/127"));
+    address = acl3.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(127, acl3.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl3.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+    IPCheck<GeneralAddress> acl4("::1");
+    address = acl4.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(128, acl4.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl4.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_3));
+
+    // Any match.  In these cases, the address should all be zeroes.
+    IPCheck<GeneralAddress> acl5("any6");
+    address = acl5.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(0, acl5.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl5.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+    IPCheck<GeneralAddress> acl6("::/0");
+    address = acl6.getAddress();
+    EXPECT_EQ(0, acl6.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl6.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+    // Some invalid strings
+    EXPECT_THROW(IPCheck<GeneralAddress>("::1/129"), isc::OutOfRange);
+    EXPECT_THROW(IPCheck<GeneralAddress>("::1/24/3"), isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>(":::1/24"), isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("2001:0db8::abcd/ww"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("2xx1:0db8::abcd/32"),
+                 isc::InvalidParameter);
+}
+
+TEST(IPCheck, V6CopyConstructor) {
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+    IPCheck<GeneralAddress> acl2(acl1);
+
+    vector<uint8_t> acl1_address = acl1.getAddress();
+    vector<uint8_t> acl2_address = acl1.getAddress();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+    EXPECT_EQ(acl1_address.size(), acl2_address.size());
+    EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+                acl2_address.begin()));
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+    vector<uint8_t> acl1_mask = acl1.getMask();
+    vector<uint8_t> acl2_mask = acl1.getMask();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+    EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+    EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+                acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6AssignmentOperator) {
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_1_STRING) + string("/48"));
+
+    acl2 = acl1;
+
+    vector<uint8_t> acl1_address = acl1.getAddress();
+    vector<uint8_t> acl2_address = acl2.getAddress();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+    EXPECT_EQ(acl1_address.size(), acl2_address.size());
+    EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+                acl2_address.begin()));
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+    vector<uint8_t> acl1_mask = acl1.getMask();
+    vector<uint8_t> acl2_mask = acl2.getMask();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+    EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+    EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+                acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6Compare) {
+    // Set up some data.
+    vector<uint8_t> v6addr_2(V6ADDR_2, V6ADDR_2 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_48(V6ADDR_2_48, V6ADDR_2_48 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_49(V6ADDR_2_49, V6ADDR_2_49 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_50(V6ADDR_2_50, V6ADDR_2_50 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_51(V6ADDR_2_51, V6ADDR_2_51 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_52(V6ADDR_2_52, V6ADDR_2_52 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_127(V6ADDR_2_127, V6ADDR_2_127 + IPV6_SIZE);
+    vector<uint8_t> v6addr_3(V6ADDR_3, V6ADDR_3 + IPV6_SIZE);
+
+    // Exact address - match if given address matches stored address.
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/128"));
+    EXPECT_TRUE(acl1.matches(v6addr_2));
+    EXPECT_FALSE(acl1.matches(v6addr_2_127));
+    EXPECT_FALSE(acl1.matches(v6addr_2_52));
+    EXPECT_FALSE(acl1.matches(v6addr_2_51));
+    EXPECT_FALSE(acl1.matches(v6addr_2_50));
+    EXPECT_FALSE(acl1.matches(v6addr_2_49));
+    EXPECT_FALSE(acl1.matches(v6addr_2_48));
+    EXPECT_FALSE(acl1.matches(v6addr_3));
+
+    // Match to various prefixes.
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/127"));
+    EXPECT_TRUE(acl2.matches(v6addr_2));
+    EXPECT_TRUE(acl2.matches(v6addr_2_127));
+    EXPECT_FALSE(acl2.matches(v6addr_2_52));
+    EXPECT_FALSE(acl2.matches(v6addr_2_51));
+    EXPECT_FALSE(acl2.matches(v6addr_2_50));
+    EXPECT_FALSE(acl2.matches(v6addr_2_49));
+    EXPECT_FALSE(acl2.matches(v6addr_2_48));
+    EXPECT_FALSE(acl2.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/52"));
+    EXPECT_TRUE(acl3.matches(v6addr_2));
+    EXPECT_TRUE(acl3.matches(v6addr_2_127));
+    EXPECT_TRUE(acl3.matches(v6addr_2_52));
+    EXPECT_FALSE(acl3.matches(v6addr_2_51));
+    EXPECT_FALSE(acl3.matches(v6addr_2_50));
+    EXPECT_FALSE(acl3.matches(v6addr_2_49));
+    EXPECT_FALSE(acl3.matches(v6addr_2_48));
+    EXPECT_FALSE(acl3.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl4(string(V6ADDR_2_STRING) + string("/51"));
+    EXPECT_TRUE(acl4.matches(v6addr_2));
+    EXPECT_TRUE(acl4.matches(v6addr_2_127));
+    EXPECT_TRUE(acl4.matches(v6addr_2_52));
+    EXPECT_TRUE(acl4.matches(v6addr_2_51));
+    EXPECT_FALSE(acl4.matches(v6addr_2_50));
+    EXPECT_FALSE(acl4.matches(v6addr_2_49));
+    EXPECT_FALSE(acl4.matches(v6addr_2_48));
+    EXPECT_FALSE(acl4.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl5(string(V6ADDR_2_STRING) + string("/50"));
+    EXPECT_TRUE(acl5.matches(v6addr_2));
+    EXPECT_TRUE(acl5.matches(v6addr_2_127));
+    EXPECT_TRUE(acl5.matches(v6addr_2_52));
+    EXPECT_TRUE(acl5.matches(v6addr_2_51));
+    EXPECT_TRUE(acl5.matches(v6addr_2_50));
+    EXPECT_FALSE(acl5.matches(v6addr_2_49));
+    EXPECT_FALSE(acl5.matches(v6addr_2_48));
+    EXPECT_FALSE(acl5.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl6(string(V6ADDR_2_STRING) + string("/0"));
+    EXPECT_TRUE(acl6.matches(v6addr_2));
+    EXPECT_TRUE(acl6.matches(v6addr_2_127));
+    EXPECT_TRUE(acl6.matches(v6addr_2_52));
+    EXPECT_TRUE(acl6.matches(v6addr_2_51));
+    EXPECT_TRUE(acl6.matches(v6addr_2_50));
+    EXPECT_TRUE(acl6.matches(v6addr_2_49));
+    EXPECT_TRUE(acl6.matches(v6addr_2_48));
+    EXPECT_TRUE(acl6.matches(v6addr_3));
+
+    // Match on any address
+    IPCheck<GeneralAddress> acl7("any6");
+    EXPECT_TRUE(acl7.matches(v6addr_2));
+    EXPECT_TRUE(acl7.matches(v6addr_2_127));
+    EXPECT_TRUE(acl7.matches(v6addr_2_52));
+    EXPECT_TRUE(acl7.matches(v6addr_2_51));
+    EXPECT_TRUE(acl7.matches(v6addr_2_50));
+    EXPECT_TRUE(acl7.matches(v6addr_2_49));
+    EXPECT_TRUE(acl7.matches(v6addr_2_48));
+}
+
+// *** Mixed-mode tests - mainly to check that no exception is thrown ***
+
+TEST(IPCheck, MixedMode) {
+
+    // ACL has a V4 address specified, check against a V6 address.
+    IPCheck<GeneralAddress> acl1("192.0.2.255/24");
+    GeneralAddress test1(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+    EXPECT_NO_THROW(acl1.matches(test1));
+    EXPECT_FALSE(acl1.matches(test1));
+
+    // Now the reverse - the ACL is specified with a V6 address.
+    IPCheck<GeneralAddress> acl2(V6ADDR_2_STRING);
+    GeneralAddress test2(0x12345678);
+    EXPECT_FALSE(acl2.matches(test2));
+
+    // Ensure only a V4 address matches "any4".
+    IPCheck<GeneralAddress> acl3("any4");
+    EXPECT_FALSE(acl3.matches(test1));
+    EXPECT_TRUE(acl3.matches(test2));
+
+    // ... and check the reverse
+    IPCheck<GeneralAddress> acl4("any6");
+    EXPECT_TRUE(acl4.matches(test1));
+    EXPECT_FALSE(acl4.matches(test2));
+
+    // Check where the bit pattern of an IPv4 address matches that of an IPv6
+    // one.
+    IPCheck<GeneralAddress> acl5("2001:db8::/32");
+    GeneralAddress test5(0x20010db8);
+    EXPECT_FALSE(acl5.matches(test5));
+
+    // ... and where the reverse is true. (2001:db8 corresponds to 32.1.13.184).
+    IPCheck<GeneralAddress> acl6("32.1.13.184");
+    GeneralAddress test6(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+    EXPECT_FALSE(acl6.matches(test6));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/loader_test.cc b/src/lib/acl/tests/loader_test.cc
index 92d40a8..7dc088d 100644
--- a/src/lib/acl/tests/loader_test.cc
+++ b/src/lib/acl/tests/loader_test.cc
@@ -12,22 +12,16 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include "logcheck.h"
+#include "creators.h"
 #include <acl/loader.h>
 #include <string>
 #include <gtest/gtest.h>
 
 using namespace std;
 using namespace boost;
-using isc::data::ConstElementPtr;
 
 namespace {
 
-// Just for convenience, create JSON objects from JSON string
-ConstElementPtr el(const string& JSON) {
-    return (isc::data::Element::fromJSON(JSON));
-}
-
 // We don't use the EXPECT_THROW macro, as it doesn't allow us
 // to examine the exception. We want to check the element is stored
 // there as well.
@@ -61,122 +55,6 @@ TEST(LoaderHelpers, DefaultActionLoader) {
     testActionLoaderException("{}");
 }
 
-// A check that doesn't check anything but remembers it's own name
-// and data
-class NamedCheck : public Check<Log> {
-public:
-    NamedCheck(const string& name, ConstElementPtr data) :
-        name_(name),
-        data_(data)
-    {}
-    virtual bool matches(const Log&) const { return (true); }
-    const string name_;
-    const ConstElementPtr data_;
-};
-
-// The creator of NamedCheck
-class NamedCreator : public Loader<Log>::CheckCreator {
-public:
-    NamedCreator(const string& name, bool abbreviatedList = true) :
-        abbreviated_list_(abbreviatedList)
-    {
-        names_.push_back(name);
-    }
-    NamedCreator(const vector<string>& names) :
-        names_(names),
-        abbreviated_list_(true)
-    {}
-    vector<string> names() const {
-        return (names_);
-    }
-    shared_ptr<Check<Log> > create(const string& name, ConstElementPtr data,
-                                   const Loader<Log>&)
-    {
-        bool found(false);
-        for (vector<string>::const_iterator i(names_.begin());
-             i != names_.end(); ++i) {
-            if (*i == name) {
-                found = true;
-                break;
-            }
-        }
-        EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
-            "doesn't handle it.";
-        return (shared_ptr<Check<Log> >(new NamedCheck(name, data)));
-    }
-    bool allowListAbbreviation() const {
-        return (abbreviated_list_);
-    }
-private:
-    vector<string> names_;
-    const bool abbreviated_list_;
-};
-
-// To be thrown in tests internally
-class TestCreatorError {};
-
-// This will throw every time it should create something
-class ThrowCreator : public Loader<Log>::CheckCreator {
-public:
-    vector<string> names() const {
-        vector<string> result;
-        result.push_back("throw");
-        return (result);
-    }
-    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
-                                   const Loader<Log>&)
-    {
-        throw TestCreatorError();
-    }
-};
-
-// This throws whenever the match is called on it
-class ThrowCheck : public Check<Log> {
-public:
-    virtual bool matches(const Log&) const {
-        throw TestCreatorError();
-    }
-};
-
-// And creator for it
-class ThrowCheckCreator : public Loader<Log>::CheckCreator {
-public:
-    vector<string> names() const {
-        vector<string> result;
-        result.push_back("throwcheck");
-        return (result);
-    }
-    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
-                                   const Loader<Log>&)
-    {
-        return (shared_ptr<Check<Log> >(new ThrowCheck()));
-    }
-};
-
-class LogCreator : public Loader<Log>::CheckCreator {
-public:
-    vector<string> names() const {
-        vector<string> result;
-        result.push_back("logcheck");
-        return (result);
-    }
-    /*
-     * For simplicity, we just take two values as a list, first is the
-     * logging cell used, the second is result of the check. No error checking
-     * is done, if there's bug in the test, it will throw TypeError for us.
-     */
-    shared_ptr<Check<Log> > create(const string&, ConstElementPtr definition,
-                                   const Loader<Log>&)
-    {
-        vector<ConstElementPtr> list(definition->listValue());
-        int logpos(list[0]->intValue());
-        bool accept(list[1]->boolValue());
-        return (shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
-    }
-    // We take a list, so don't interpret it for us
-    virtual bool allowListAbbreviation() const { return (false); }
-};
-
 class LoaderTest : public ::testing::Test {
 public:
     LoaderTest() :
@@ -198,16 +76,21 @@ public:
         EXPECT_NO_THROW(loader_.registerCreator(
             namedCreator(name, abbreviatedList)));
     }
-    // Load a check and convert it to named check to examine it
-    shared_ptr<NamedCheck> loadCheck(const string& definition) {
+    template<class Result> shared_ptr<Result> loadCheckAny(const string&
+                                                               definition)
+    {
         SCOPED_TRACE("Loading check " + definition);
         shared_ptr<Check<Log> > loaded;
         EXPECT_NO_THROW(loaded = loader_.loadCheck(el(definition)));
-        shared_ptr<NamedCheck> result(dynamic_pointer_cast<NamedCheck>(
+        shared_ptr<Result> result(dynamic_pointer_cast<Result>(
             loaded));
         EXPECT_TRUE(result);
         return (result);
     }
+    // Load a check and convert it to named check to examine it
+    shared_ptr<NamedCheck> loadCheck(const string& definition) {
+        return (loadCheckAny<NamedCheck>(definition));
+    }
     // The loadCheck throws an exception
     void checkException(const string& JSON) {
         SCOPED_TRACE("Loading check exception: " + JSON);
@@ -255,6 +138,20 @@ public:
         aclSetup();
         EXPECT_THROW(loader_.load(el(JSON)), LoaderError);
     }
+    // Check that the subexpression is NamedCheck with correct data
+    void isSubexprNamed(const CompoundCheck<Log>* compound, size_t index,
+                        const string& name, ConstElementPtr data)
+    {
+        if (index < compound->getSubexpressions().size()) {
+            const NamedCheck*
+                check(dynamic_cast<const NamedCheck*>(compound->
+                                                      getSubexpressions()
+                                                      [index]));
+            ASSERT_TRUE(check) << "The subexpression is of different type";
+            EXPECT_EQ(name, check->name_);
+            EXPECT_TRUE(data->equals(*check->data_));
+        }
+    }
 };
 
 // Test that it does not accept duplicate creator
@@ -331,19 +228,67 @@ TEST_F(LoaderTest, CheckPropagate) {
     EXPECT_THROW(loader_.loadCheck(el("{\"throw\": null}")), TestCreatorError);
 }
 
-// The abbreviated form is not yet implemented
-// (we need the operators to be implemented)
+// The abbreviated form of check
 TEST_F(LoaderTest, AndAbbrev) {
     addNamed("name1");
     addNamed("name2");
-    EXPECT_THROW(loader_.loadCheck(el("{\"name1\": 1, \"name2\": 2}")),
-                 LoaderError);
+    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": 2}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        EXPECT_EQ(2, oper->getSubexpressions().size());
+        // Note: this test relies on the ordering in which map returns it's
+        // elements, which is in the lexicographical order of the strings.
+        // This is not required from our interface, but is easier to write
+        // the test.
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        isSubexprNamed(&*oper, 1, "name2", el("2"));
+    }
 }
 
+// The abbreviated form of parameters
 TEST_F(LoaderTest, OrAbbrev) {
     addNamed("name1");
-    EXPECT_THROW(loader_.loadCheck(el("{\"name1\": [1, 2]}")),
-                 LoaderError);
+    shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AnyOfSpec, Log> >("{\"name1\": [1, 2]}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        EXPECT_EQ(2, oper->getSubexpressions().size());
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        isSubexprNamed(&*oper, 1, "name1", el("2"));
+    }
+}
+
+// Combined abbreviated form, both at once
+
+// The abbreviated form of check
+TEST_F(LoaderTest, BothAbbrev) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": [3, 4]}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        ASSERT_EQ(2, oper->getSubexpressions().size());
+        // Note: this test relies on the ordering in which map returns it's
+        // elements, which is in the lexicographical order of the strings.
+        // This is not required from our interface, but is easier to write
+        // the test.
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        const LogicOperator<AnyOfSpec, Log>*
+            orOper(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>(
+            oper->getSubexpressions()[1]));
+        ASSERT_TRUE(orOper) << "Different type than AnyOf operator";
+        EXPECT_EQ(2, orOper->getSubexpressions().size());
+        isSubexprNamed(orOper, 0, "name2", el("3"));
+        isSubexprNamed(orOper, 1, "name2", el("4"));
+    }
 }
 
 // But this is not abbreviated form, this should be passed directly to the
diff --git a/src/lib/acl/tests/logcheck.h b/src/lib/acl/tests/logcheck.h
index c5e1bb1..776ff53 100644
--- a/src/lib/acl/tests/logcheck.h
+++ b/src/lib/acl/tests/logcheck.h
@@ -12,6 +12,9 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#ifndef LOGCHECK_H
+#define LOGCHECK_H
+
 #include <gtest/gtest.h>
 #include <acl/acl.h>
 #include <cassert>
@@ -84,3 +87,5 @@ private:
 };
 
 }
+
+#endif
diff --git a/src/lib/acl/tests/logic_check_test.cc b/src/lib/acl/tests/logic_check_test.cc
new file mode 100644
index 0000000..c4b00eb
--- /dev/null
+++ b/src/lib/acl/tests/logic_check_test.cc
@@ -0,0 +1,208 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/logic_check.h>
+
+using namespace isc::acl;
+
+namespace {
+
+// Test the defs in AnyOfSpec
+TEST(LogicOperators, AnyOfSpec) {
+    EXPECT_FALSE(AnyOfSpec::start());
+    EXPECT_FALSE(AnyOfSpec::terminate(false));
+    EXPECT_TRUE(AnyOfSpec::terminate(true));
+}
+
+// Test the defs in AllOfSpec
+TEST(LogicOperators, AllOfSpec) {
+    EXPECT_TRUE(AllOfSpec::start());
+    EXPECT_TRUE(AllOfSpec::terminate(false));
+    EXPECT_FALSE(AllOfSpec::terminate(true));
+}
+
+// Generic test of one check
+template<typename Mode>
+void
+testCheck(bool emptyResult) {
+    // It can be created
+    LogicOperator<Mode, Log> oper;
+    // It is empty by default
+    EXPECT_EQ(0, oper.getSubexpressions().size());
+    // And returns true, as all 0 of the subexpressions return true
+    Log log;
+    EXPECT_EQ(emptyResult, oper.matches(log));
+    log.checkFirst(0);
+    // Fill it with some subexpressions
+    typedef shared_ptr<ConstCheck> CheckPtr;
+    oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 0)));
+    oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 1)));
+    // Check what happens when only the default-valued are there
+    EXPECT_EQ(2, oper.getSubexpressions().size());
+    EXPECT_EQ(emptyResult, oper.matches(log));
+    log.checkFirst(2);
+    oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 2)));
+    oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 3)));
+    // They are listed there
+    EXPECT_EQ(4, oper.getSubexpressions().size());
+    // Now, the last one kills it, but the first ones will run, the fourth
+    // won't
+    EXPECT_EQ(!emptyResult, oper.matches(log));
+    log.checkFirst(3);
+}
+
+TEST(LogicOperators, AllOf) {
+    testCheck<AllOfSpec>(true);
+}
+
+TEST(LogicOperators, AnyOf) {
+    testCheck<AnyOfSpec>(false);
+}
+
+// Fixture for the tests of the creators
+class LogicCreatorTest : public ::testing::Test {
+private:
+    typedef shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
+public:
+    // Register some creators, both tested ones and some auxiliary ones for
+    // help
+    LogicCreatorTest():
+        loader_(REJECT)
+    {
+        loader_.registerCreator(CreatorPtr(new
+            LogicCreator<AnyOfSpec, Log>("ANY")));
+        loader_.registerCreator(CreatorPtr(new
+            LogicCreator<AllOfSpec, Log>("ALL")));
+        loader_.registerCreator(CreatorPtr(new ThrowCreator));
+        loader_.registerCreator(CreatorPtr(new LogCreator));
+    }
+    // To mark which parts of the check did run
+    Log log_;
+    // The loader
+    Loader<Log> loader_;
+    // Some convenience shortcut names
+    typedef LogicOperator<AnyOfSpec, Log> AnyOf;
+    typedef LogicOperator<AllOfSpec, Log> AllOf;
+    typedef shared_ptr<AnyOf> AnyOfPtr;
+    typedef shared_ptr<AllOf> AllOfPtr;
+    // Loads the JSON as a check and tries to convert it to the given check
+    // subclass
+    template<typename Result> shared_ptr<Result> load(const string& JSON) {
+        shared_ptr<Check<Log> > result;
+        EXPECT_NO_THROW(result = loader_.loadCheck(el(JSON)));
+        shared_ptr<Result>
+            resultConverted(dynamic_pointer_cast<Result>(result));
+        EXPECT_NE(shared_ptr<Result>(), resultConverted);
+        return (resultConverted);
+    }
+};
+
+// Test it can load empty ones
+TEST_F(LogicCreatorTest, empty) {
+    AnyOfPtr emptyAny(load<AnyOf>("{\"ANY\": []}"));
+    EXPECT_EQ(0, emptyAny->getSubexpressions().size());
+    AllOfPtr emptyAll(load<AllOf>("{\"ALL\": []}"));
+    EXPECT_EQ(0, emptyAll->getSubexpressions().size());
+}
+
+// Test it rejects invalid inputs (not a list as a parameter)
+TEST_F(LogicCreatorTest, invalid) {
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": null}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": {}}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": true}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": 42}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": \"hello\"}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": null}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": {}}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": true}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": 42}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": \"hello\"}")), LoaderError);
+}
+
+// Exceptions from subexpression creation isn't caught
+TEST_F(LogicCreatorTest, propagate) {
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": [{\"throw\": null}]}")),
+                 TestCreatorError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": [{\"throw\": null}]}")),
+                 TestCreatorError);
+}
+
+// We can create more complex ANY check and run it correctly
+TEST_F(LogicCreatorTest, anyRun) {
+    AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+                             "    {\"logcheck\": [0, false]},"
+                             "    {\"logcheck\": [1, true]},"
+                             "    {\"logcheck\": [2, true]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_TRUE(any->matches(log_));
+    log_.checkFirst(2);
+}
+
+// We can create more complex ALL check and run it correctly
+TEST_F(LogicCreatorTest, allRun) {
+    AllOfPtr any(load<AllOf>("{\"ALL\": ["
+                             "    {\"logcheck\": [0, true]},"
+                             "    {\"logcheck\": [1, false]},"
+                             "    {\"logcheck\": [2, false]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_FALSE(any->matches(log_));
+    log_.checkFirst(2);
+}
+
+// Or is able to return false
+TEST_F(LogicCreatorTest, anyFalse) {
+    AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+                             "    {\"logcheck\": [0, false]},"
+                             "    {\"logcheck\": [1, false]},"
+                             "    {\"logcheck\": [2, false]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_FALSE(any->matches(log_));
+    log_.checkFirst(3);
+}
+
+// And is able to return true
+TEST_F(LogicCreatorTest, andTrue) {
+    AllOfPtr all(load<AllOf>("{\"ALL\": ["
+                             "    {\"logcheck\": [0, true]},"
+                             "    {\"logcheck\": [1, true]},"
+                             "    {\"logcheck\": [2, true]}"
+                             "]}"));
+    EXPECT_EQ(3, all->getSubexpressions().size());
+    EXPECT_TRUE(all->matches(log_));
+    log_.checkFirst(3);
+}
+
+// We can nest them together
+TEST_F(LogicCreatorTest, nested) {
+    AllOfPtr all(load<AllOf>("{\"ALL\": ["
+                             "    {\"ANY\": ["
+                             "        {\"logcheck\": [0, true]},"
+                             "        {\"logcheck\": [2, true]},"
+                             "    ]},"
+                             "    {\"logcheck\": [1, false]}"
+                             "]}"));
+    EXPECT_EQ(2, all->getSubexpressions().size());
+    const LogicOperator<AnyOfSpec, Log>*
+        any(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>
+            (all->getSubexpressions()[0]));
+    EXPECT_EQ(2, any->getSubexpressions().size());
+    EXPECT_FALSE(all->matches(log_));
+    log_.checkFirst(2);
+}
+
+}
diff --git a/src/lib/acl/tests/run_unittests.cc b/src/lib/acl/tests/run_unittests.cc
index 61df6cf..8dc59a2 100644
--- a/src/lib/acl/tests/run_unittests.cc
+++ b/src/lib/acl/tests/run_unittests.cc
@@ -13,11 +13,12 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <gtest/gtest.h>
+#include <log/logger_support.h>
 #include <util/unittests/run_all.h>
 
 int
 main(int argc, char* argv[]) {
     ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
     return (isc::util::unittests::run_all());
 }
-
diff --git a/src/lib/asiodns/Makefile.am b/src/lib/asiodns/Makefile.am
index 2a6c3ac..2d246ef 100644
--- a/src/lib/asiodns/Makefile.am
+++ b/src/lib/asiodns/Makefile.am
@@ -8,13 +8,13 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-CLEANFILES = *.gcno *.gcda asiodef.h asiodef.cc
+CLEANFILES = *.gcno *.gcda asiodns_messages.h asiodns_messages.cc
 
 # Define rule to build logging source files from message file
-asiodef.h asiodef.cc: asiodef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodef.mes
+asiodns_messages.h asiodns_messages.cc: asiodns_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodns_messages.mes
 
-BUILT_SOURCES = asiodef.h asiodef.cc
+BUILT_SOURCES = asiodns_messages.h asiodns_messages.cc
 
 lib_LTLIBRARIES = libasiodns.la
 libasiodns_la_SOURCES = dns_answer.h
@@ -26,9 +26,9 @@ libasiodns_la_SOURCES += tcp_server.cc tcp_server.h
 libasiodns_la_SOURCES += udp_server.cc udp_server.h
 libasiodns_la_SOURCES += io_fetch.cc io_fetch.h
 
-nodist_libasiodns_la_SOURCES = asiodef.cc asiodef.h
+nodist_libasiodns_la_SOURCES = asiodns_messages.cc asiodns_messages.h
 
-EXTRA_DIST = asiodef.mes
+EXTRA_DIST = asiodns_messages.mes
 
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # B10_CXXFLAGS)
diff --git a/src/lib/asiodns/asiodef.mes b/src/lib/asiodns/asiodef.mes
deleted file mode 100644
index 4f4090d..0000000
--- a/src/lib/asiodns/asiodef.mes
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX ASIODNS_
-$NAMESPACE isc::asiodns
-
-% FETCHCOMP   upstream fetch to %1(%2) has now completed
-A debug message, this records that the upstream fetch (a query made by the
-resolver on behalf of its client) to the specified address has completed.
-
-% FETCHSTOP   upstream fetch to %1(%2) has been stopped
-An external component has requested the halting of an upstream fetch.  This
-is an allowed operation, and the message should only appear if debug is
-enabled.
-
-% OPENSOCK    error %1 opening %2 socket to %3(%4)
-The asynchronous I/O code encountered an error when trying to open a socket
-of the specified protocol in order to send a message to the target address.
-The number of the system error that cause the problem is given in the
-message.
-
-% RECVSOCK    error %1 reading %2 data from %3(%4)
-The asynchronous I/O code encountered an error when trying to read data from
-the specified address on the given protocol.  The number of the system
-error that cause the problem is given in the message.
-
-% SENDSOCK    error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-
-% RECVTMO     receive timeout while waiting for data from %1(%2)
-An upstream fetch from the specified address timed out.  This may happen for
-any number of reasons and is most probably a problem at the remote server
-or a problem on the network.  The message will only appear if debug is
-enabled.
-
-% UNKORIGIN  unknown origin for ASIO error code %1 (protocol: %2, address %3)
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-
-% UNKRESULT  unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message).  This message should
-not appear and may indicate an internal error.  Please enter a bug report.
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
new file mode 100644
index 0000000..3e11ede
--- /dev/null
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -0,0 +1,56 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::asiodns
+
+% ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+
+% ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+
+% ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that cause the problem is given in the
+message.
+
+% ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol.  The number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+
+% ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+
+% ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message).  Please submit a bug report.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 4b2edf9..31b5f50 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -41,7 +41,7 @@
 #include <log/logger.h>
 #include <log/macros.h>
 
-#include <asiodns/asiodef.h>
+#include <asiodns/asiodns_messages.h>
 #include <asiodns/io_fetch.h>
 
 #include <util/buffer.h>
@@ -158,7 +158,7 @@ struct IOFetchData {
         stopped(false),
         timeout(wait),
         packet(false),
-        origin(ASIODNS_UNKORIGIN),
+        origin(ASIODNS_UNKNOWN_ORIGIN),
         staging(),
         qid(QidGenerator::getInstance().generateQid())
     {}
@@ -280,7 +280,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
 
         // Open a connection to the target system.  For speed, if the operation
         // is synchronous (i.e. UDP operation) we bypass the yield.
-        data_->origin = ASIODNS_OPENSOCK;
+        data_->origin = ASIODNS_OPEN_SOCKET;
         if (data_->socket->isOpenSynchronous()) {
             data_->socket->open(data_->remote_snd.get(), *this);
         } else {
@@ -290,7 +290,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
         do {
             // Begin an asynchronous send, and then yield.  When the send completes,
             // we will resume immediately after this point.
-            data_->origin = ASIODNS_SENDSOCK;
+            data_->origin = ASIODNS_SEND_DATA;
             CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
                 data_->msgbuf->getLength(), data_->remote_snd.get(), *this);
     
@@ -313,7 +313,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
             // received all the data before copying it back to the user's buffer.
             // And we want to minimise the amount of copying...
     
-            data_->origin = ASIODNS_RECVSOCK;
+            data_->origin = ASIODNS_READ_DATA;
             data_->cumulative = 0;          // No data yet received
             data_->offset = 0;              // First data into start of buffer
             data_->received->clear();       // Clear the receive buffer
@@ -329,7 +329,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
 
         // Finished with this socket, so close it.  This will not generate an
         // I/O error, but reset the origin to unknown in case we change this.
-        data_->origin = ASIODNS_UNKORIGIN;
+        data_->origin = ASIODNS_UNKNOWN_ORIGIN;
         data_->socket->close();
 
         /// We are done
@@ -367,13 +367,13 @@ IOFetch::stop(Result result) {
         data_->stopped = true;
         switch (result) {
             case TIME_OUT:
-                LOG_DEBUG(logger, DBG_COMMON, ASIODNS_RECVTMO).
+                LOG_DEBUG(logger, DBG_COMMON, ASIODNS_READ_TIMEOUT).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
                 break;
 
             case SUCCESS:
-                LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCHCOMP).
+                LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCH_COMPLETED).
                     arg(data_->remote_rcv->getAddress().toText()).
                     arg(data_->remote_rcv->getPort());
                 break;
@@ -382,13 +382,13 @@ IOFetch::stop(Result result) {
                 // Fetch has been stopped for some other reason.  This is
                 // allowed but as it is unusual it is logged, but with a lower
                 // debug level than a timeout (which is totally normal).
-                LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCHSTOP).
+                LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCH_STOPPED).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
                 break;
 
             default:
-                LOG_ERROR(logger, ASIODNS_UNKRESULT).
+                LOG_ERROR(logger, ASIODNS_UNKNOWN_RESULT).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
         }
@@ -412,10 +412,10 @@ IOFetch::stop(Result result) {
 void IOFetch::logIOFailure(asio::error_code ec) {
 
     // Should only get here with a known error code.
-    assert((data_->origin == ASIODNS_OPENSOCK) ||
-           (data_->origin == ASIODNS_SENDSOCK) ||
-           (data_->origin == ASIODNS_RECVSOCK) ||
-           (data_->origin == ASIODNS_UNKORIGIN));
+    assert((data_->origin == ASIODNS_OPEN_SOCKET) ||
+           (data_->origin == ASIODNS_SEND_DATA) ||
+           (data_->origin == ASIODNS_READ_DATA) ||
+           (data_->origin == ASIODNS_UNKNOWN_ORIGIN));
 
     static const char* PROTOCOL[2] = {"TCP", "UDP"};
     LOG_ERROR(logger, data_->origin).arg(ec.value()).
diff --git a/src/lib/cc/data.h b/src/lib/cc/data.h
index 0a363f4..5c731e6 100644
--- a/src/lib/cc/data.h
+++ b/src/lib/cc/data.h
@@ -479,7 +479,7 @@ public:
         return (true);
     }
     using Element::setValue;
-    bool setValue(std::map<std::string, ConstElementPtr>& v) {
+    bool setValue(const std::map<std::string, ConstElementPtr>& v) {
         m = v;
         return (true);
     }
diff --git a/src/lib/config/Makefile.am b/src/lib/config/Makefile.am
index 52337ad..500ff12 100644
--- a/src/lib/config/Makefile.am
+++ b/src/lib/config/Makefile.am
@@ -6,10 +6,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 
 # Define rule to build logging source files from message file
-configdef.h configdef.cc: configdef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/configdef.mes
+config_messages.h config_messages.cc: config_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/config_messages.mes
 
-BUILT_SOURCES = configdef.h configdef.cc
+BUILT_SOURCES = config_messages.h config_messages.cc
 
 lib_LTLIBRARIES = libcfgclient.la
 libcfgclient_la_SOURCES = config_data.h config_data.cc
@@ -17,9 +17,9 @@ libcfgclient_la_SOURCES += module_spec.h module_spec.cc
 libcfgclient_la_SOURCES += ccsession.cc ccsession.h
 libcfgclient_la_SOURCES += config_log.h config_log.cc
 
-nodist_libcfgclient_la_SOURCES  = configdef.h configdef.cc
+nodist_libcfgclient_la_SOURCES  = config_messages.h config_messages.cc
 
 # The message file should be in the distribution.
-EXTRA_DIST = configdef.mes
+EXTRA_DIST = config_messages.mes
 
-CLEANFILES = *.gcno *.gcda configdef.h configdef.cc
+CLEANFILES = *.gcno *.gcda config_messages.h config_messages.cc
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index dd2be3d..6b094ec 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -23,6 +23,7 @@
 #include <fstream>
 #include <sstream>
 #include <cerrno>
+#include <set>
 
 #include <boost/bind.hpp>
 #include <boost/foreach.hpp>
@@ -38,6 +39,7 @@
 #include <log/logger_support.h>
 #include <log/logger_specification.h>
 #include <log/logger_manager.h>
+#include <log/logger_name.h>
 
 using namespace std;
 
@@ -213,7 +215,8 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
                 ConstElementPtr logger,
                 const ConfigData& config_data)
 {
-    const std::string lname = logger->get("name")->stringValue();
+    std::string lname = logger->get("name")->stringValue();
+
     ConstElementPtr severity_el = getValueOrDefault(logger,
                                       "severity", config_data,
                                       "loggers/severity");
@@ -246,6 +249,50 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
 
 } // end anonymous namespace
 
+
+ConstElementPtr
+getRelatedLoggers(ConstElementPtr loggers) {
+    // Keep a list of names for easier lookup later
+    std::set<std::string> our_names;
+    const std::string& root_name = isc::log::getRootLoggerName();
+
+    ElementPtr result = isc::data::Element::createList();
+
+    BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+        const std::string cur_name = cur_logger->get("name")->stringValue();
+        if (cur_name == root_name || cur_name.find(root_name + ".") == 0) {
+            our_names.insert(cur_name);
+            result->add(cur_logger);
+        }
+    }
+
+    // now find the * names
+    BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+        std::string cur_name = cur_logger->get("name")->stringValue();
+        // if name is '*', or starts with '*.', replace * with root
+        // logger name
+        if (cur_name == "*" || cur_name.length() > 1 &&
+            cur_name[0] == '*' && cur_name[1] == '.') {
+
+            cur_name = root_name + cur_name.substr(1);
+            // now add it to the result list, but only if a logger with
+            // that name was not configured explicitely
+            if (our_names.find(cur_name) == our_names.end()) {
+                // we substitute the name here already, but as
+                // we are dealing with consts, we copy the data
+                ElementPtr new_logger(Element::createMap());
+                // since we'll only be updating one first-level element,
+                // and we return as const again, a shallow map copy is
+                // enough
+                new_logger->setValue(cur_logger->mapValue());
+                new_logger->set("name", Element::create(cur_name));
+                result->add(new_logger);
+            }
+        }
+    }
+    return result;
+}
+
 void
 default_logconfig_handler(const std::string& module_name,
                           ConstElementPtr new_config,
@@ -255,8 +302,9 @@ default_logconfig_handler(const std::string& module_name,
     std::vector<isc::log::LoggerSpecification> specs;
 
     if (new_config->contains("loggers")) {
+        ConstElementPtr loggers = getRelatedLoggers(new_config->get("loggers"));
         BOOST_FOREACH(ConstElementPtr logger,
-                      new_config->get("loggers")->listValue()) {
+                      loggers->listValue()) {
             readLoggersConf(specs, logger, config_data);
         }
     }
@@ -274,7 +322,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
     // this file should be declared in a @something@ directive
     file.open(filename.c_str());
     if (!file) {
-        LOG_ERROR(config_logger, CONFIG_FOPEN_ERR).arg(filename).arg(strerror(errno));
+        LOG_ERROR(config_logger, CONFIG_OPEN_FAIL).arg(filename).arg(strerror(errno));
         isc_throw(CCSessionInitError, strerror(errno));
     }
 
@@ -284,7 +332,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
         LOG_ERROR(config_logger, CONFIG_JSON_PARSE).arg(filename).arg(pe.what());
         isc_throw(CCSessionInitError, pe.what());
     } catch (const ModuleSpecError& dde) {
-        LOG_ERROR(config_logger, CONFIG_MODULE_SPEC).arg(filename).arg(dde.what());
+        LOG_ERROR(config_logger, CONFIG_MOD_SPEC_FORMAT).arg(filename).arg(dde.what());
         isc_throw(CCSessionInitError, dde.what());
     }
     file.close();
@@ -334,7 +382,7 @@ ModuleCCSession::ModuleCCSession(
     int rcode;
     ConstElementPtr err = parseAnswer(rcode, answer);
     if (rcode != 0) {
-        LOG_ERROR(config_logger, CONFIG_MANAGER_MOD_SPEC).arg(answer->str());
+        LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
         isc_throw(CCSessionInitError, answer->str());
     }
     
@@ -348,7 +396,7 @@ ModuleCCSession::ModuleCCSession(
         if (rcode == 0) {
             handleConfigUpdate(new_config);
         } else {
-            LOG_ERROR(config_logger, CONFIG_MANAGER_CONFIG).arg(new_config->str());
+            LOG_ERROR(config_logger, CONFIG_GET_FAIL).arg(new_config->str());
             isc_throw(CCSessionInitError, answer->str());
         }
     }
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index 0d4b7f3..7dc34ba 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -373,8 +373,41 @@ default_logconfig_handler(const std::string& module_name,
                           isc::data::ConstElementPtr new_config,
                           const ConfigData& config_data);
 
-}
-}
+
+/// \brief Returns the loggers related to this module
+///
+/// This function does two things;
+/// - it drops the configuration parts for loggers for other modules
+/// - it replaces the '*' in the name of the loggers by the name of
+///   this module, but *only* if the expanded name is not configured
+///   explicitely
+///
+/// Examples: if this is the module b10-resolver,
+/// For the config names ['*', 'b10-auth']
+/// The '*' is replaced with 'b10-resolver', and this logger is used.
+/// 'b10-auth' is ignored (of course, it will not be in the b10-auth
+/// module).
+///
+/// For ['*', 'b10-resolver']
+/// The '*' is ignored, and only 'b10-resolver' is used.
+///
+/// For ['*.reslib', 'b10-resolver']
+/// Or ['b10-resolver.reslib', '*']
+/// Both are used, where the * will be expanded to b10-resolver
+///
+/// \note This is a public function at this time, but mostly for
+/// the purposes of testing. Once we can directly test what loggers
+/// are running, this function may be moved to the unnamed namespace
+///
+/// \param loggers the original 'loggers' config list
+/// \return ListElement containing only loggers relevant for this
+///         module, where * is replaced by the root logger name
+isc::data::ConstElementPtr
+getRelatedLoggers(isc::data::ConstElementPtr loggers);
+
+} // namespace config
+
+} // namespace isc
 #endif // __CCSESSION_H
 
 // Local Variables:
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 22e5a5c..0063855 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -16,7 +16,7 @@
 #define __CONFIG_LOG__H
 
 #include <log/macros.h>
-#include "configdef.h"
+#include "config_messages.h"
 
 namespace isc {
 namespace config {
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
new file mode 100644
index 0000000..660ab9a
--- /dev/null
+++ b/src/lib/config/config_messages.mes
@@ -0,0 +1,59 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::config
+
+% CONFIG_CCSESSION_MSG error in CC session message: %1
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+
+% CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+
+The most likely cause of this error is a programming error.  Please raise
+a bug report.
+
+% CONFIG_GET_FAIL error getting configuration from cfgmgr: %1
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+
+% CONFIG_JSON_PARSE JSON parse error in %1: %2
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+
+% CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+
+% CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+
+% CONFIG_OPEN_FAIL error opening %1: %2
+There was an error opening the given file. The reason for the failure
+is included in the message.
diff --git a/src/lib/config/configdef.mes b/src/lib/config/configdef.mes
deleted file mode 100644
index be39073..0000000
--- a/src/lib/config/configdef.mes
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX CONFIG_
-$NAMESPACE isc::config
-
-% FOPEN_ERR     error opening %1: %2
-There was an error opening the given file.
-
-% JSON_PARSE    JSON parse error in %1: %2
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-
-% MODULE_SPEC   module specification error in %1: %2
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
-
-% MANAGER_MOD_SPEC    module specification not accepted by cfgmgr: %1
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-
-% MANAGER_CONFIG    error getting configuration from cfgmgr: %1
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
-
-% CCSESSION_MSG error in CC session message: %1
-There was a problem with an incoming message on the command and control
-channel. The message does not appear to be a valid command, and is
-missing a required element or contains an unknown data format. This
-most likely means that another BIND10 module is sending a bad message.
-The message itself is ignored by this module.
-
-% CCSESSION_MSG_INTERNAL error handling CC session message: %1
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index e5fe049..e1a4f9d 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -24,6 +24,8 @@
 
 #include <config/tests/data_def_unittests_config.h>
 
+#include <log/logger_name.h>
+
 using namespace isc::data;
 using namespace isc::config;
 using namespace isc::cc;
@@ -632,4 +634,64 @@ TEST_F(CCSessionTest, doubleStartWithAddRemoteConfig) {
     EXPECT_THROW(mccs.addRemoteConfig(ccspecfile("spec2.spec")),
                  FakeSession::DoubleRead);
 }
+
+namespace {
+void doRelatedLoggersTest(const char* input, const char* expected) {
+    ConstElementPtr all_conf = isc::data::Element::fromJSON(input);
+    ConstElementPtr expected_conf = isc::data::Element::fromJSON(expected);
+    EXPECT_EQ(*expected_conf, *isc::config::getRelatedLoggers(all_conf));
+}
+} // end anonymous namespace
+
+TEST(LogConfigTest, relatedLoggersTest) {
+    // make sure logger configs for 'other' programs are ignored,
+    // and that * is substituted correctly
+    // The default root logger name is "bind10"
+    doRelatedLoggersTest("[{ \"name\": \"other_module\" }]",
+                         "[]");
+    doRelatedLoggersTest("[{ \"name\": \"other_module.somelib\" }]",
+                         "[]");
+    doRelatedLoggersTest("[{ \"name\": \"bind10_other\" }]",
+                         "[]");
+    doRelatedLoggersTest("[{ \"name\": \"bind10_other.somelib\" }]",
+                         "[]");
+    doRelatedLoggersTest("[ { \"name\": \"other_module\" },"
+                         "  { \"name\": \"bind10\" }]",
+                         "[ { \"name\": \"bind10\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"bind10\" }]",
+                         "[ { \"name\": \"bind10\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"bind10.somelib\" }]",
+                         "[ { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+                         "  { \"name\": \"bind10.somelib\" }]",
+                         "[ { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+                         "  { \"name\": \"bind10\" },"
+                         "  { \"name\": \"bind10.somelib\" }]",
+                         "[ { \"name\": \"bind10\" },"
+                         "  { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"*\" }]",
+                         "[ { \"name\": \"bind10\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"*.somelib\" }]",
+                         "[ { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+                         "  { \"name\": \"bind10\", \"severity\": \"WARN\"}]",
+                         "[ { \"name\": \"bind10\", \"severity\": \"WARN\"} ]");
+    doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+                         "  { \"name\": \"some_module\", \"severity\": \"WARN\"}]",
+                         "[ { \"name\": \"bind10\", \"severity\": \"DEBUG\"} ]");
+
+    // make sure 'bad' things like '*foo.x' or '*lib' are ignored
+    // (cfgmgr should have already caught it in the logconfig plugin
+    // check, and is responsible for reporting the error)
+    doRelatedLoggersTest("[ { \"name\": \"*foo\" }]",
+                         "[ ]");
+    doRelatedLoggersTest("[ { \"name\": \"*foo.bar\" }]",
+                         "[ ]");
+    doRelatedLoggersTest("[ { \"name\": \"*foo\" },"
+                         "  { \"name\": \"*foo.lib\" },"
+                         "  { \"name\": \"bind10\" } ]",
+                         "[ { \"name\": \"bind10\" } ]");
+}
+
 }
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index e028186..457d5b0 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,7 +7,7 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-CLEANFILES = *.gcno *.gcda messagedef.h messagedef.cc
+CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
 
 lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
@@ -21,15 +21,15 @@ libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
 libdatasrc_la_SOURCES += zone.h
 libdatasrc_la_SOURCES += result.h
 libdatasrc_la_SOURCES += logger.h logger.cc
-nodist_libdatasrc_la_SOURCES = messagedef.h messagedef.cc
+nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
 libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 
-BUILT_SOURCES = messagedef.h messagedef.cc
-messagedef.h messagedef.cc: Makefile messagedef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/messagedef.mes
+BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 
-EXTRA_DIST = messagedef.mes
+EXTRA_DIST = datasrc_messages.mes
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
new file mode 100644
index 0000000..c692364
--- /dev/null
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -0,0 +1,493 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::datasrc
+
+# \brief Messages for the data source library
+
+% DATASRC_CACHE_CREATE creating the hotspot cache
+Debug information that the hotspot cache was created at startup.
+
+% DATASRC_CACHE_DESTROY destroying the hotspot cache
+Debug information. The hotspot cache is being destroyed.
+
+% DATASRC_CACHE_DISABLE disabling the cache
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+
+% DATASRC_CACHE_ENABLE enabling the cache
+The hotspot cache is enabled from now on.
+
+% DATASRC_CACHE_EXPIRED the item '%1' is expired
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+
+% DATASRC_CACHE_FOUND the item '%1' was found
+Debug information. An item was successfully looked up in the hotspot cache.
+
+% DATASRC_CACHE_FULL cache is full, dropping oldest
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_INSERT inserting item '%1' into the cache
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+
+% DATASRC_CACHE_NOT_FOUND the item '%1' was not found
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+
+% DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_REMOVE removing '%1' from the cache
+Debug information. An item is being removed from the hotspot cache.
+
+% DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+
+% DATASRC_DO_QUERY handling query for '%1/%2'
+Debug information. We're processing some internal query for given name and
+type.
+
+% DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
+Debug information. An RRset is being added to the in-memory data source.
+
+% DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+
+% DATASRC_MEM_ADD_ZONE adding zone '%1/%2'
+Debug information. A zone is being added into the in-memory data source.
+
+% DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+
+% DATASRC_MEM_CNAME CNAME at the domain '%1'
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+
+% DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+
+% DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+
+% DATASRC_MEM_CREATE creating zone '%1' in '%2' class
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+
+% DATASRC_MEM_DELEG_FOUND delegation found at '%1'
+Debug information. A delegation point was found above the requested record.
+
+% DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class
+Debug information. A zone from in-memory data source is being destroyed.
+
+% DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+
+% DATASRC_MEM_DNAME_FOUND DNAME found at '%1'
+Debug information. A DNAME was found instead of the requested information.
+
+% DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+
+% DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+
+% DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+
+% DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+
+% DATASRC_MEM_FIND find '%1/%2'
+Debug information. A search for the requested RRset is being started.
+
+% DATASRC_MEM_FIND_ZONE looking for zone '%1'
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+
+% DATASRC_MEM_LOAD loading zone '%1' from file '%2'
+Debug information. The content of master file is being loaded into the memory.
+
+% DATASRC_MEM_NOTFOUND requested domain '%1' not found
+Debug information. The requested domain does not exist.
+
+% DATASRC_MEM_NS_ENCOUNTERED encountered a NS
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+
+% DATASRC_MEM_NXRRSET no such type '%1' at '%2'
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+
+% DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+
+% DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+
+% DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+
+% DATASRC_MEM_SUCCESS query for '%1/%2' successful
+Debug information. The requested record was found.
+
+% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+
+% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+
+% DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+
+% DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_META_ADD adding a data source into meta data source
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+
+% DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+
+% DATASRC_META_REMOVE removing data source from meta data source
+Debug information. A data source is being removed from meta data source.
+
+% DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'
+Debug information. A NSEC record covering this zone is being added.
+
+% DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+
+% DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message
+Debug information. An RRset is being added to the response message.
+
+% DATASRC_QUERY_ADD_SOA adding SOA of '%1'
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+
+% DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+
+% DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+
+% DATASRC_QUERY_CACHED data for %1/%2 found in cache
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+
+% DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+
+% DATASRC_QUERY_COPY_AUTH copying authoritative section into message
+Debug information. The whole referral information is being copied into the
+response message.
+
+% DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+
+% DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+
+% DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+
+% DATASRC_QUERY_FAIL query failed
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+
+% DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+
+% DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+
+% DATASRC_QUERY_INVALID_OP invalid query operation requested
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+
+% DATASRC_QUERY_IS_AUTH auth query (%1/%2)
+Debug information. The last DO_QUERY is an auth query.
+
+% DATASRC_QUERY_IS_GLUE glue query (%1/%2)
+Debug information. The last DO_QUERY is query for glue addresses.
+
+% DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+
+% DATASRC_QUERY_IS_REF query for referral (%1/%2)
+Debug information. The last DO_QUERY is query for referral information.
+
+% DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)
+Debug information. The last DO_QUERY is a simple query.
+
+% DATASRC_QUERY_MISPLACED_TASK task of this type should not be here
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+
+% DATASRC_QUERY_MISSING_NS missing NS records for '%1'
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+
+% DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+
+% DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+
+% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+
+% DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+
+% DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class
+Debug information. A sure query is being processed now.
+
+% DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+
+% DATASRC_QUERY_REF_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+
+% DATASRC_QUERY_RRSIG unable to answer RRSIG query
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+
+% DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+
+% DATASRC_QUERY_TASK_FAIL task failed with %1
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+
+% DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+
+% DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+
+% DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+
+% DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+
+% DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+
+% DATASRC_SQLITE_CLOSE closing SQLite database
+Debug information. The SQLite data source is closing the database file.
+% DATASRC_SQLITE_CREATE SQLite data source created
+Debug information. An instance of SQLite data source is being created.
+
+% DATASRC_SQLITE_DESTROY SQLite data source destroyed
+Debug information. An instance of SQLite data source is being destroyed.
+
+% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+
+% DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+
+% DATASRC_SQLITE_FIND looking for RRset '%1/%2'
+Debug information. The SQLite data source is looking up a resource record
+set.
+
+% DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
+Debug information. The data source is looking up the addresses for given
+domain name.
+
+% DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'
+Debug information. The SQLite data source is looking up an exact resource
+record.
+
+% DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDREC looking for record '%1/%2'
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+
+% DATASRC_SQLITE_FINDREF looking for referral at '%1'
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+
+% DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+
+% DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+
+% DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+
+% DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+
+% DATASRC_SQLITE_OPEN opening SQLite database '%1'
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+
+% DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'
+Debug information. We're trying to look up name preceding the supplied one.
+
+% DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+
+% DATASRC_SQLITE_SETUP setting up SQLite database
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+
+% DATASRC_STATIC_BAD_CLASS static data source can handle CH only
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+
+% DATASRC_STATIC_CREATE creating the static datasource
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+
+% DATASRC_STATIC_FIND looking for '%1/%2'
+Debug information. This resource record set is being looked up in the static
+data source.
+
+% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
+This indicates a programming error. An internal task of unknown type was
+generated.
+
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index 7c2828d..ac5d50b 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -16,7 +16,7 @@
 #define __DATASRC_LOGGER_H
 
 #include <log/macros.h>
-#include <datasrc/messagedef.h>
+#include <datasrc/datasrc_messages.h>
 
 /// \file logger.h
 /// \brief Data Source library global logger
diff --git a/src/lib/datasrc/messagedef.mes b/src/lib/datasrc/messagedef.mes
deleted file mode 100644
index dedd2ad..0000000
--- a/src/lib/datasrc/messagedef.mes
+++ /dev/null
@@ -1,494 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX DATASRC_
-$NAMESPACE isc::datasrc
-
-# \brief Messages for the data source library
-
-% CACHE_CREATE creating the hotspot cache
-Debug information that the hotspot cache was created at startup.
-
-% CACHE_DESTROY destroying the hotspot cache
-Debug information. The hotspot cache is being destroyed.
-
-% CACHE_INSERT inserting item '%1' into the cache
-Debug information. It means a new item is being inserted into the hotspot
-cache.
-
-% CACHE_OLD_FOUND older instance of cache item found, replacing
-Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_FULL cache is full, dropping oldest
-Debug information. After inserting an item into the hotspot cache, the
-maximum number of items was exceeded, so the least recently used item will
-be dropped. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_REMOVE removing '%1' from the cache
-Debug information. An item is being removed from the hotspot cache.
-
-% CACHE_NOT_FOUND the item '%1' was not found
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-
-% CACHE_FOUND the item '%1' was found
-Debug information. An item was successfully looked up in the hotspot cache.
-
-% CACHE_EXPIRED the item '%1' is expired
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
-
-% CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
-The maximum allowed number of items of the hotspot cache is set to the given
-number. If there are too many, some of them will be dropped. The size of 0
-means no limit.
-
-% CACHE_ENABLE enabling the cache
-The hotspot cache is enabled from now on.
-
-% CACHE_DISABLE disabling the cache
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-
-% QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
-
-% QUERY_EMPTY_DNAME the DNAME on '%1' is empty
-During an attempt to synthesize CNAME from this DNAME it was discovered the
-DNAME is empty (it has no records). This indicates problem with supplied data.
-
-% QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
-Debug information. While processing a query, a NS record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
-Debug information. While processing a query, a MX record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_FOLLOW_CNAME following CNAME at '%1'
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
-
-% QUERY_EMPTY_CNAME CNAME at '%1' is empty
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
-
-% QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
-A CNAME led to another CNAME and it led to another, and so on. After 16
-CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
-might possibly be a loop as well. Note that some of the CNAMEs might have
-been synthesized from DNAMEs. This indicates problem with supplied data.
-
-% QUERY_CHECK_CACHE checking cache for '%1/%2'
-Debug information. While processing a query, lookup to the hotspot cache
-is being made.
-
-% QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for ANY queries for consistency
-reasons.
-
-% QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for authoritative ANY queries
-for consistency reasons.
-
-% DO_QUERY handling query for '%1/%2'
-Debug information. We're processing some internal query for given name and
-type.
-
-% QUERY_NO_ZONE no zone containing '%1' in class '%2'
-Lookup of domain failed because the data have no zone that contain the
-domain. Maybe someone sent a query to the wrong server for some reason.
-
-% QUERY_CACHED data for %1/%2 found in cache
-Debug information. The requested data were found in the hotspot cache, so
-no query is sent to the real data source.
-
-% QUERY_IS_SIMPLE simple query (%1/%2)
-Debug information. The last DO_QUERY is a simple query.
-
-% QUERY_IS_AUTH auth query (%1/%2)
-Debug information. The last DO_QUERY is an auth query.
-
-% QUERY_IS_GLUE glue query (%1/%2)
-Debug information. The last DO_QUERY is query for glue addresses.
-
-% QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
-Debug information. The last DO_QUERY is query for addresses that are not
-glue.
-
-% QUERY_IS_REF query for referral (%1/%2)
-Debug information. The last DO_QUERY is query for referral information.
-
-% QUERY_SIMPLE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the simple query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_AUTH_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the authoritative query. 1 means
-some error, 2 is not implemented. The data source should have logged the
-specific error already.
-
-% QUERY_GLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the glue query. 1 means some error,
-2 is not implemented. The data source should have logged the specific error
-already.
-
-% QUERY_NOGLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the no-glue query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_REF_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the query for referral information.
-1 means some error, 2 is not implemented. The data source should have logged
-the specific error already.
-
-% QUERY_INVALID_OP invalid query operation requested
-This indicates a programmer error. The DO_QUERY was called with unknown
-operation code.
-
-% QUERY_ADD_RRSET adding RRset '%1/%2' to message
-Debug information. An RRset is being added to the response message.
-
-% QUERY_COPY_AUTH copying authoritative section into message
-Debug information. The whole referral information is being copied into the
-response message.
-
-% QUERY_DELEGATION looking for delegation on the path to '%1'
-Debug information. The software is trying to identify delegation points on the
-way down to the given domain.
-
-% QUERY_ADD_SOA adding SOA of '%1'
-Debug information. A SOA record of the given zone is being added to the
-authority section of the response message.
-
-% QUERY_ADD_NSEC adding NSEC record for '%1'
-Debug information. A NSEC record covering this zone is being added.
-
-% QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
-Debug information. A NSEC3 record for the given zone is being added to the
-response message.
-
-% QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
-An attempt to add a NSEC3 record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
-An attempt to add a NSEC record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_WILDCARD looking for a wildcard covering '%1'
-Debug information. A direct match wasn't found, so a wildcard covering the
-domain is being looked for now.
-
-% QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
-While processing a wildcard, it wasn't possible to prove nonexistence of the
-given domain or record.  The code is 1 for error and 2 for not implemented.
-
-% QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
-While processing a wildcard, a referral was met. But it wasn't possible to get
-enough information for it.  The code is 1 for error, 2 for not implemented.
-
-% QUERY_PROCESS processing query '%1/%2' in the '%3' class
-Debug information. A sure query is being processed now.
-
-% QUERY_RRSIG unable to answer RRSIG query
-The server is unable to answer a direct query for RRSIG type, but was asked
-to do so.
-
-% QUERY_MISPLACED_TASK task of this type should not be here
-This indicates a programming error. A task was found in the internal task
-queue, but this kind of task wasn't designed to be inside the queue (it should
-be handled right away, not queued).
-
-% QUERY_TASK_FAIL task failed with %1
-The query subtask failed. The reason should have been reported by the subtask
-already. The code is 1 for error, 2 for not implemented.
-
-% QUERY_MISSING_NS missing NS records for '%1'
-NS records should have been put into the authority section. However, this zone
-has none. This indicates problem with provided data.
-
-% UNEXPECTED_QUERY_STATE unexpected query state
-This indicates a programming error. An internal task of unknown type was
-generated.
-
-% QUERY_FAIL query failed
-Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
-
-% QUERY_BAD_REFERRAL bad referral to '%1'
-The domain lives in another zone. But it is not possible to generate referral
-information for it.
-
-% QUERY_WILDCARD_FAIL error processing wildcard for '%1'
-During an attempt to cover the domain by a wildcard an error happened. The
-exact kind was hopefully already reported.
-
-% QUERY_MISSING_SOA the zone '%1' has no SOA
-The answer should have been a negative one (eg. of nonexistence of something).
-To do so, a SOA record should be put into the authority section, but the zone
-does not have one. This indicates problem with provided data.
-
-% QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
-The user wants DNSSEC and we discovered the entity doesn't exist (either
-domain or the record). But there was an error getting NSEC/NSEC3 record
-to prove the nonexistence.
-
-% QUERY_UNKNOWN_RESULT unknown result of subtask
-This indicates a programmer error. The answer of subtask doesn't look like
-anything known.
-
-% META_ADD adding a data source into meta data source
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
-
-% META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
-It was attempted to add a data source into a meta data source. But their
-classes do not match.
-
-% META_REMOVE removing data source from meta data source
-Debug information. A data source is being removed from meta data source.
-
-% MEM_ADD_WILDCARD adding wildcards for '%1'
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
-
-% MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
-Someone or something tried to add a CNAME into a domain that already contains
-some other data. But the protocol forbids coexistence of CNAME with anything
-(RFC 1034, section 3.6.2). This indicates a problem with provided data.
-
-% MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
-This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some other data to CNAME.
-
-% MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
-
-% MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
-Some resource types are singletons -- only one is allowed in a domain
-(for example CNAME or SOA). This indicates a problem with provided data.
-
-% MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
-It was attempted to add the domain into a zone that shouldn't have it
-(eg. the domain is not subdomain of the zone origin). This indicates a
-problem with provided data.
-
-% MEM_WILDCARD_NS NS record in wildcard domain '%1'
-The software refuses to load NS records into a wildcard domain.  It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
-The software refuses to load DNAME records into a wildcard domain.  It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
-Debug information. An RRset is being added to the in-memory data source.
-
-% MEM_DUP_RRSET duplicate RRset '%1/%2'
-An RRset is being inserted into in-memory data source for a second time.  The
-original version must be removed first. Note that loading master files where an
-RRset is split into multiple locations is not supported yet.
-
-% MEM_DNAME_ENCOUNTERED encountered a DNAME
-Debug information. While searching for the requested domain, a DNAME was
-encountered on the way.  This may lead to redirection to a different domain and
-stop the search.
-
-% MEM_NS_ENCOUNTERED encountered a NS
-Debug information. While searching for the requested domain, a NS was
-encountered on the way (a delegation). This may lead to stop of the search.
-
-% MEM_RENAME renaming RRset from '%1' to '%2'
-Debug information. A RRset is being generated from a different RRset (most
-probably a wildcard). So it must be renamed to whatever the user asked for. In
-fact, it's impossible to rename RRsets with our libraries, so a new one is
-created and all resource records are copied over.
-
-% MEM_FIND find '%1/%2'
-Debug information. A search for the requested RRset is being started.
-
-% MEM_DNAME_FOUND DNAME found at '%1'
-Debug information. A DNAME was found instead of the requested information.
-
-% MEM_DELEG_FOUND delegation found at '%1'
-Debug information. A delegation point was found above the requested record.
-
-% MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
-Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
-case (eg. the domain exists, but it doesn't have the requested record type).
-
-% MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
-Debug information. A domain above wildcard was reached, but there's something
-below the requested domain. Therefore the wildcard doesn't apply here.  This
-behaviour is specified by RFC 1034, section 4.3.3
-
-% MEM_NOTFOUND requested domain '%1' not found
-Debug information. The requested domain does not exist.
-
-% MEM_DOMAIN_EMPTY requested domain '%1' is empty
-Debug information. The requested domain exists in the tree of domains, but
-it is empty. Therefore it doesn't contain the requested resource type.
-
-% MEM_EXACT_DELEGATION delegation at the exact domain '%1'
-Debug information. There's a NS record at the requested domain. This means
-this zone is not authoritative for the requested domain, but a delegation
-should be followed. The requested domain is an apex of some zone.
-
-% MEM_ANY_SUCCESS ANY query for '%1' successful
-Debug information. The domain was found and an ANY type query is being answered
-by providing everything found inside the domain.
-
-% MEM_SUCCESS query for '%1/%2' successful
-Debug information. The requested record was found.
-
-% MEM_CNAME CNAME at the domain '%1'
-Debug information. The requested domain is an alias to a different domain,
-returning the CNAME instead.
-
-% MEM_NXRRSET no such type '%1' at '%2'
-Debug information. The domain exists, but it doesn't hold any record of the
-requested type.
-
-% MEM_CREATE creating zone '%1' in '%2' class
-Debug information. A representation of a zone for the in-memory data source is
-being created.
-
-% MEM_DESTROY destroying zone '%1' in '%2' class
-Debug information. A zone from in-memory data source is being destroyed.
-
-% MEM_LOAD loading zone '%1' from file '%2'
-Debug information. The content of master file is being loaded into the memory.
-
-% MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
-Debug information. The contents of two in-memory zones are being exchanged.
-This is usual practice to do some manipulation in exception-safe manner -- the
-new data are prepared in a different zone object and when it works, they are
-swapped. The old one contains the new data and the other one can be safely
-destroyed.
-
-% MEM_ADD_ZONE adding zone '%1/%2'
-Debug information. A zone is being added into the in-memory data source.
-
-% MEM_FIND_ZONE looking for zone '%1'
-Debug information. A zone object for this zone is being searched for in the
-in-memory data source.
-
-% STATIC_CREATE creating the static datasource
-Debug information. The static data source (the one holding stuff like
-version.bind) is being created.
-
-% STATIC_BAD_CLASS static data source can handle CH only
-For some reason, someone asked the static data source a query that is not in
-the CH class.
-
-% STATIC_FIND looking for '%1/%2'
-Debug information. This resource record set is being looked up in the static
-data source.
-
-% SQLITE_FINDREC looking for record '%1/%2'
-Debug information. The SQLite data source is looking up records of given name
-and type in the database.
-
-% SQLITE_ENCLOSURE looking for zone containing '%1'
-Debug information. The SQLite data source is trying to identify which zone
-should hold this domain.
-
-% SQLITE_ENCLOSURE_NOTFOUND no zone contains it
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
-no such zone in our data.
-
-% SQLITE_PREVIOUS looking for name previous to '%1'
-Debug information. We're trying to look up name preceding the supplied one.
-
-% SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
-
-% SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
-Debug information. We're trying to look up a NSEC3 record in the SQLite data
-source.
-
-% SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
-The SQLite data source was asked to provide a NSEC3 record for given zone.
-But it doesn't contain that zone.
-
-% SQLITE_FIND looking for RRset '%1/%2'
-Debug information. The SQLite data source is looking up a resource record
-set.
-
-% SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an RRset, but the data source contains
-different class than the query was for.
-
-% SQLITE_FINDEXACT looking for exact RRset '%1/%2'
-Debug information. The SQLite data source is looking up an exact resource
-record.
-
-% SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an exact RRset, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
-Debug information. The data source is looking up the addresses for given
-domain name.
-
-% SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
-The SQLite data source was looking up A/AAAA addresses, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDREF looking for referral at '%1'
-Debug information. The SQLite data source is identifying if this domain is
-a referral and where it goes.
-
-% SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
-The SQLite data source was trying to identify if there's a referral. But
-it contains different class than the query was for.
-
-% SQLITE_CREATE SQLite data source created
-Debug information. An instance of SQLite data source is being created.
-
-% SQLITE_DESTROY SQLite data source destroyed
-Debug information. An instance of SQLite data source is being destroyed.
-
-% SQLITE_SETUP setting up SQLite database
-The database for SQLite data source was found empty. It is assumed this is the
-first run and it is being initialized with current schema.  It'll still contain
-no data, but it will be ready for use.
-
-% SQLITE_OPEN opening SQLite database '%1'
-Debug information. The SQLite data source is loading an SQLite database in
-the provided file.
-
-% SQLITE_CLOSE closing SQLite database
-Debug information. The SQLite data source is closing the database file.
diff --git a/tools/system_messages.py b/tools/system_messages.py
new file mode 100644
index 0000000..6cf3ce9
--- /dev/null
+++ b/tools/system_messages.py
@@ -0,0 +1,413 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Produce System Messages Manual
+#
+# This tool reads all the .mes files in the directory tree whose root is given
+# on the command line and interprets them as BIND 10 message files.  It pulls
+# all the messages and description out, sorts them by message ID, and writes
+# them out as a single (formatted) file.
+#
+# Invocation:
+# The code is invoked using the command line:
+#
+# python system_messages.py [-o <output-file>] <top-source-directory>
+#
+# If no output file is specified, output is written to stdout.
+
+import re
+import os
+import sys
+from optparse import OptionParser
+
+# Main dictionary holding all the messages.  The messages are accumulated here
+# before being printed in alphabetical order.
+dictionary = {}
+
+# The structure of the output page is:
+#
+#        header
+#           message
+#        separator
+#           message
+#        separator
+#          :
+#        separator
+#           message
+#        trailer
+#
+# (Indentation is not relevant - it has only been added to the above
+# illustration to make the structure clearer.)  The text of these section is:
+
+# Header - this is output before anything else.
+SEC_HEADER="""<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash  "&#x2014;" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<book>
+  <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+  <bookinfo>
+    <title>BIND 10 Messages Manual</title>
+
+    <copyright>
+      <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+    </copyright>
+
+    <abstract>
+      <para>BIND 10 is a Domain Name System (DNS) suite managed by
+	  Internet Systems Consortium (ISC). It includes DNS libraries
+	  and modular components for controlling authoritative and
+	  recursive DNS servers.
+      </para>
+      <para>
+        This is the messages manual for BIND 10 version &__VERSION__;.
+	    The most up-to-date version of this document, along with
+	    other documents for BIND 10, can be found at
+        <ulink url="http://bind10.isc.org/docs"/>.
+      </para>
+    </abstract>
+
+    <releaseinfo>This is the messages manual for BIND 10 version
+        &__VERSION__;.</releaseinfo>
+  </bookinfo>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+      This document lists each message that can be logged by the
+      programs in the BIND 10 package.  Each entry in this manual
+      is of the form:
+      <screen>IDENTIFICATION message-text</screen>
+      ... where "IDENTIFICATION" is the message identification included
+      in each message logged and "message-text" is the accompanying
+      message text.  The "message-text" may include placeholders of the
+      form "%1", "%2" etc.; these parameters are replaced by relevant
+      values when the message is logged.
+    </para>
+    <para>
+      Each entry is also accompanied by a description giving more
+      information about the circumstances that result in the message
+      being logged.
+    </para>
+    <para>
+      For information on configuring and using BIND 10 logging,
+      refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+    </para>
+  </chapter>
+
+  <chapter id="messages">
+    <title>BIND 10 Messages</title>
+    <para>
+      <variablelist>
+"""
+
+# This is output once for each message.  The string contains substitution
+# tokens: $I is replaced by the message identification, $T by the message text,
+# and $D by the message description.
+SEC_MESSAGE = """<varlistentry id="$I">
+<term>$I $T</term>
+<listitem><para>
+$D
+</para></listitem>
+</varlistentry>"""
+
+# A description may contain blank lines intended to separate paragraphs.  If so,
+# each blank line is replaced by the following.
+SEC_BLANK = "</para><para>"
+
+# The separator is copied to the output verbatim after each message except
+# the last.
+SEC_SEPARATOR = ""
+
+# The trailier is copied to the output verbatim after the last message.
+SEC_TRAILER = """      </variablelist>
+    </para>
+  </chapter>
+</book>"""
+
+
+def reportError(filename, what):
+    """Report an error and exit"""
+    print("*** ERROR in ", filename, file=sys.stderr)
+    print("*** REASON: ", what, file=sys.stderr)
+    print("*** System message generator terminating", file=sys.stderr)
+    sys.exit(1)
+
+
+
+def replaceTag(string):
+    """Replaces the '<' and '>' in text about to be inserted into the template
+       sections above with < and > to avoid problems with message text
+       being interpreted as XML text.
+    """
+    string1 = string.replace("<", "<")
+    string2 = string1.replace(">", ">")
+    return string2
+
+
+
+def replaceBlankLines(lines):
+    """Replaces blank lines in an array with the contents of the 'blank'
+       section.
+    """
+    result = []
+    for l in lines:
+        if len(l) == 0:
+            result.append(SEC_BLANK)
+        else:
+            result.append(l)
+
+    return result
+
+
+
+# Printing functions
+def printHeader():
+    print(SEC_HEADER)
+
+def printSeparator():
+    print(SEC_SEPARATOR)
+
+def printMessage(msgid):
+    # In the message ID, replace "<" and ">" with XML-safe versions and
+    # substitute into the data.
+    m1 = SEC_MESSAGE.replace("$I", replaceTag(msgid))
+
+    # Do the same for the message text.
+    m2 = m1.replace("$T", replaceTag(dictionary[msgid]['text']))
+
+    # Do the same for the description then replace blank lines with the
+    # specified separator.  (We do this in that order to avoid replacing
+    # the "<" and ">" in the XML tags in the separator.)
+    desc1 = [replaceTag(l) for l in dictionary[msgid]['description']]
+    desc2 = replaceBlankLines(desc1)
+
+    # Join the lines together to form a single string and insert into
+    # current text.
+    m3 = m2.replace("$D", "\n".join(desc2))
+
+    print(m3)
+
+def printTrailer():
+    print(SEC_TRAILER)
+
+
+
+def removeEmptyLeadingTrailing(lines):
+    """Removes leading and trailing empty lines.
+
+       A list of strings is passed as argument, some of which may be empty.
+       This function removes from the start and end of list a contiguous
+       sequence of empty lines and returns the result.  Embedded sequence of
+       empty lines are not touched.
+
+       Parameters:
+       lines List of strings to be modified.
+
+       Return:
+       Input list of strings with leading/trailing blank line sequences
+       removed.
+    """
+
+    retlines = []
+
+    # Dispose of degenerate case of empty array
+    if len(lines) == 0:
+        return retlines
+
+    # Search for first non-blank line
+    start = 0
+    while start < len(lines):
+        if len(lines[start]) > 0:
+            break
+        start = start + 1
+
+    # Handle case when entire list is empty
+    if start >= len(lines):
+        return retlines
+
+    # Search for last non-blank line
+    finish = len(lines) - 1
+    while finish >= 0:
+        if len(lines[finish]) > 0:
+            break
+        finish = finish - 1
+
+    retlines = lines[start:finish + 1]
+    return retlines
+
+
+
+def addToDictionary(msgid, msgtext, desc, filename):
+    """Add the current message ID and associated information to the global
+       dictionary.  If a message with that ID already exists, loop appending
+       suffixes of the form "(n)" to it until one is found that doesn't.
+
+       Parameters:
+       msgid        Message ID
+       msgtext      Message text
+       desc         Message description
+       filename     File from which the message came.  Currently this is
+                    not used, but a future enhancement may wish to include the
+                    name of the message file in the messages manual.
+    """
+
+    # If the ID is in the dictionary, append a "(n)" to the name - this wil
+    # flag that there are multiple instances.  (However, this is an error -
+    # each ID should be unique in BIND-10.)
+    if msgid in dictionary:
+        i = 1
+        while msgid + " (" + str(i) + ")" in dictionary:
+            i = i + 1
+        msgid = msgid + " (" + str(i) + ")"
+
+    # Remove leading and trailing blank lines in the description, then
+    # add everything into a subdictionary which is then added to the main
+    # one.
+    details = {}
+    details['text'] = msgtext
+    details['description'] = removeEmptyLeadingTrailing(desc)
+    details['filename'] = filename
+    dictionary[msgid] = details
+
+
+
+def processFileContent(filename, lines):
+    """Processes file content.  Messages and descriptions are identified and
+       added to a dictionary (keyed by message ID).  If the key already exists,
+       a numeric suffix is added to it.
+
+       Parameters:
+       filename     Name of the message file being processed
+       lines        Lines read from the file
+    """
+
+    prefix = ""         # Last prefix encountered
+    msgid = ""          # Last message ID encountered
+    msgtext = ""        # Text of the message
+    description = []    # Description
+
+    for l in lines:
+        if l.startswith("$"):
+            # Starts with "$".  Ignore anything other than $PREFIX
+            words = re.split("\s+", l)
+            if words[0].upper() == "$PREFIX":
+                if len(words) == 1:
+                    prefix = ""
+                else:
+                    prefix = words[1]
+
+        elif l.startswith("%"):
+            # Start of a message.  Add the message we were processing to the
+            # dictionary and clear everything apart from the file name.
+            if msgid != "":
+                addToDictionary(msgid, msgtext, description, filename)
+
+            msgid = ""
+            msgtext = ""
+            description = []
+
+            # Start of a message
+            l = l[1:].strip()       # Remove "%" and trim leading spaces
+            if len(l) == 0:
+                printError(filename, "Line with single % found")
+                next
+
+            # Split into words.  The first word is the message ID
+            words = re.split("\s+", l)
+            msgid = (prefix + words[0]).upper()
+            msgtext = l[len(words[0]):].strip()
+
+        else:
+            # Part of a description, so add to the current description array
+            description.append(l)
+
+    # All done, add the last message to the global dictionaty.
+    if msgid != "":
+        addToDictionary(msgid, msgtext, description, filename)
+
+
+
+def processFile(filename):
+    """Processes a file by reading it in and stripping out all comments and
+       and directives.  Leading and trailing blank lines in the file are removed
+       and the remainder passed for message processing.
+
+       Parameters:
+       filename     Name of the message file to process
+    """
+    lines = open(filename).readlines();
+
+    # Trim leading and trailing spaces from each line, and remove comments.
+    lines = [l.strip() for l in lines]
+    lines = [l for l in lines if not l.startswith("#")]
+
+    # Remove leading/trailing empty line sequences from the result
+    lines = removeEmptyLeadingTrailing(lines)
+
+    # Interpret content
+    processFileContent(filename, lines)
+
+
+
+def processAllFiles(root):
+    """Iterates through all files in the tree starting at the given root and
+       calls processFile for all .mes files found.
+
+       Parameters:
+       root     Directory that is the root of the BIND-10 source tree
+    """
+    for (path, dirs, files) in os.walk(root):
+
+        # Identify message files
+        mes_files = [f for f in files if f.endswith(".mes")]
+
+        # ... and process each file in the list
+        for m in mes_files:
+            processFile(path + os.sep + m)
+
+
+# Main program
+if __name__ == "__main__":
+    parser = OptionParser(usage="Usage: %prog [--help | options] root")
+    parser.add_option("-o", "--output", dest="output", default=None,
+                      metavar="FILE", 
+                      help="output file name (default to stdout)")
+    (options, args) = parser.parse_args()
+
+    if len(args) == 0:
+        parser.error("Must supply directory at which to begin search")
+    elif len(args) > 1:
+        parser.error("Only a single root directory can be given")
+
+    # Redirect output if specified (errors are written to stderr)
+    if options.output is not None:
+        sys.stdout = open(options.output, 'w')
+
+    # Read the files and load the data
+    processAllFiles(args[0])
+
+    # Now just print out everything we've read (in alphabetical order).
+    count = 1
+    printHeader()
+    for msgid in sorted(dictionary):
+        if count > 1:
+            printSeparator()
+        count = count + 1
+        printMessage(msgid)
+    printTrailer()




More information about the bind10-changes mailing list