BIND 10 trac1001, updated. cac876d0c8ab31aa9007411aacb5ef6ecda398a0 Merge branch 'master' into trac1001

BIND 10 source code commits bind10-changes at lists.isc.org
Tue Jun 28 02:32:47 UTC 2011


The branch, trac1001 has been updated
       via  cac876d0c8ab31aa9007411aacb5ef6ecda398a0 (commit)
       via  f9245031dcdecba55204916535555ea20374878a (commit)
       via  f9b5323ae8c8ffd7d4d2b69c360dc497b935d6de (commit)
       via  8ec67a677e0ee2ecab48d112c3c5f5a5c5753543 (commit)
       via  95707bad7adcc6963baebc1b7e3b005d1b8e316b (commit)
       via  373bed0a95cbe38e67282e7ccca8cdd8fc2372f0 (commit)
       via  a01f7fc667d5fe05428231479d8e934673b40407 (commit)
       via  5355f8f14648fddf13cda7240530e7b4216da671 (commit)
       via  dcdabc780fce8d02c9263f8e98f03b29bb4e5210 (commit)
       via  0fad7d4a8557741f953eda9fed1d351a3d9dc5ef (commit)
       via  3e861eb6aec036b3c5a2f6a71c6ff3adbdc9a55a (commit)
       via  502100d7b9cd9d2300e78826a3bddd024ef38a74 (commit)
       via  b477df5d4dbce5b72ebd183b83555f62aa3fcec5 (commit)
       via  701c0d6d7c484c2f46951d23fba47c760363b7e4 (commit)
       via  5d5173ef0cc48d206464b39f696d03bae9daecea (commit)
       via  7cc074aacd5159778111fa4cbdbe1c89e6a4e51b (commit)
       via  dc087934c1a1946cfdcf63b49a70aa0fefe6b282 (commit)
       via  6f7998f9a209e9dd7b3ac80793098dfd81b489b9 (commit)
       via  feed2b3537a4e57e4cb55232242c6622d1fcc654 (commit)
       via  fb032e397153a63e4f1bd3b9b7fc1a89c01e7d6f (commit)
       via  d57f30ffe93b7f45aa6492ea1fba5d594adc01df (commit)
       via  690dafd743f765f04b21d3ce15ec0a63da6a53bd (commit)
       via  251a32a1fd1e7be23d59790e57a4b40fbcdceae3 (commit)
       via  6bc6c57d5761ccd2ef65291e81bbfd995b4758a9 (commit)
       via  0f1b7a45520517a40b7b85d57d461e20e81b7aa9 (commit)
       via  885a4ecf9c87b8e3a028b6488b0e6b853365edc8 (commit)
       via  8d5a5b95c85af1f15654fe164f306fe21065ea73 (commit)
       via  77367a5d67709b65afd8689159e5192416326cb7 (commit)
       via  935bd760ed4f39213f8db8eab730bf41dc217da9 (commit)
       via  52adf933c0bed4753a06632b25a46055d23eb655 (commit)
       via  4fe29ae03d1ff8f6d721b42f4bb356702110c4e0 (commit)
       via  e3fa282a59eea69c50dcb9354e568a8503510511 (commit)
       via  58df861a260fdf06b17194e224fb8c1bd03f0392 (commit)
       via  77e3f8cf3f3fe79c7dd5f92f30d70c47b515f4cd (commit)
       via  4a88c75d4d1decc3b3d5518bd12d592c118a7fd5 (commit)
       via  ea1b177b5503687f974252d185a9543066af20ed (commit)
       via  6535d4fbe623226171b27730f60161436d0433e4 (commit)
       via  0f4c693c3399bd9ecf2d2a5682fda8ed1eb8158f (commit)
       via  877e89713ad2398b6637b843a22c3b12607fe5bb (commit)
       via  33e08ca107c127d5c158882e7f2e86770a48c572 (commit)
       via  32fb3ad97a7ccc65ef391b84c8f488d4ea71e963 (commit)
       via  04e7fe3f480462d288c17bd121a368b74292cfd3 (commit)
       via  354fcf46bf93f1e2e317043f2998a8b17f22fe04 (commit)
       via  21acce853a4269f0db76dc2768bb7c5107b1b7d4 (commit)
       via  c021505a1a0d6ecb15a8fd1592b94baff6d115f4 (commit)
       via  02aa9813c1f6829bb9089400c5397f3faba7d9e0 (commit)
       via  3017593b63f34c4bc69494be8c80327eaad5d922 (commit)
       via  62bc6cce6fe7343c4ef06c7e690939fd0aa20148 (commit)
       via  77c17d3f03de64646da89de238288a22c49e3eb5 (commit)
       via  6f8383136ae83eb439c71a70c4bde83524b72c5e (commit)
       via  a16c7925f9a00f44680e2ca984def99d6bb3cecf (commit)
       via  12c37af78f65301858be28679695a9e818270947 (commit)
       via  c58fa9e4c5aa486bb270681a45a4f0f7e04b4139 (commit)
       via  89324744df3f73de1beaefb9420aeab5f9ff7824 (commit)
       via  f9070aee950581a47c0916cb1f3b48cd4bfcb7f4 (commit)
       via  ea15d26afc9ced4a11aea6733ea3df0969c5618b (commit)
       via  f685e5c06c382180eb1775bce714ea60154b08f2 (commit)
       via  5a19ee14367d9bb796c8e43c034ee9f327052c86 (commit)
       via  f92d30bb55decf9ed4d7cdf10231dfe2913ca11a (commit)
       via  461a9d0a1e896e0a1b676c6873f74404d5ab95c1 (commit)
       via  bc81810505f7263aedb8654d139510058c251626 (commit)
       via  b57c51e6ddfc6770d5c66eab5aeb1a5238e5a7ea (commit)
       via  ddb1b2241fc03a1d08dea42907ee8f859d3b2f46 (commit)
       via  0b838ba0d3c60203a52d1a918333846116e607cb (commit)
       via  f77021d7838e33e1662b42776ccc49be4435b1f2 (commit)
       via  632cd6151b871e060d09a79f6b8a283cc0ab469c (commit)
       via  7358d4af5775ee1bfa6099f63443d2ad27347f0d (commit)
       via  81a2df84a879ca5cbaaa61dffce5c413d920011d (commit)
       via  59b380d3682bb9fca26cae2c70c6c49934823f01 (commit)
       via  8b2247a6ae88fbf16bfd65852feb0216a4ea4dac (commit)
       via  1b01a9d09e5ecf21ff8bd9cce1c20372846a775c (commit)
       via  735f817c7f66813135b4ef576c117aa424a5bdad (commit)
       via  fef88019d325474471a353304499e7919023912e (commit)
       via  99522dd887762e71cbf4d895486f0e2f915eabda (commit)
       via  999736efa5e3aaf06949675c4f77e1ef9cd0d71b (commit)
       via  9c862cc45629b24d0a704926d339796926c692e5 (commit)
       via  85d5708e2c44e04b1a148610434de2c040d7142b (commit)
       via  e6b3d50483fb739da2ca83e493a1c30043ba0464 (commit)
       via  fc29e92af2bd2cfe8fa77dd311b9382680fd6324 (commit)
       via  78cffeb00933814658da0867ada0209403946b51 (commit)
       via  9129a474d3289157a4d8eb761383352dbfc2586e (commit)
       via  417893fc06dcd5339e2cd0278a6badbbe847d6c4 (commit)
       via  8e715d5202d79361622e89ef11a0d433558768f8 (commit)
       via  3f15151252dd734210582a2ae8923dada661231f (commit)
       via  79ec6c320ec5c24036856fd6b589ba8bf8b26ffc (commit)
       via  e53411af37a32d0c9b14515bd90c1e701c69f6e9 (commit)
       via  edea2125fa0791f920e3dd9e45c8aa0c9bfc6eb5 (commit)
       via  8f5fafa643f2d908b9e97b6d08aeb55c4b96addf (commit)
       via  01f9c1c0adfb37d11133c87056161f1edfba2672 (commit)
       via  ac7aaa887d827f8bdf1c2881d245cc655c6847b7 (commit)
       via  ebb6493b8ff763d42fe99438c8befe48c381b4aa (commit)
       via  c786a61641a965545c2e304b1c946afdedc6dc1a (commit)
       via  1efa5d9d7f699cc3ee636d4e1b50b3fb3a863180 (commit)
       via  e5251c4886f626e6ef9f6ba82771c0e949e0071f (commit)
       via  aaad42c52aed2c3890378511ecb2f97a3731d23a (commit)
       via  4beebf47805d0c3f80872e8f690f09c1658ae4e2 (commit)
       via  792c8b202cffc8fed726f10b3514523b1fc92469 (commit)
       via  8c624c6644563ed9c4fecec8b0b5f5dd115fe7ef (commit)
       via  d1c7f98e910bd19d21a649386f1a8066e4f41677 (commit)
       via  a90c8a06056300e0f9f5ffdae72b8a2ba26346fc (commit)
       via  30570ab2d917dc6adec02ba272ee50c17124b688 (commit)
       via  59908b70a929baf829202197d6e7ab5a3557da32 (commit)
       via  585d1c63d6d0126607f424571e38a4a60683cf4b (commit)
       via  d335ae50bb855b7b302dab852005385c0227dcfb (commit)
       via  8034dbfe87c45eaa2c0aef0e715b86fa79a7c4e3 (commit)
       via  0ddf0f5fa4d9d18599a1642b9f87caaa1f463c5e (commit)
       via  5a75094dfdd5f2307c4a1669e05db70355b08682 (commit)
       via  df5bad72ac8dac07a038f29823a1938bc9bbe72c (commit)
       via  24a865aeec3048620dea967cba9bb1df28cfd052 (commit)
       via  6756ff6dacd40b74676b4243bc12ea02a43f3ae9 (commit)
       via  6722a4d52b519ed768fa70b31cdd10da868cacbc (commit)
       via  ca9e1e2997420dea3e3b14fea010ab0af3d75f32 (commit)
       via  5fe9e2f204d67a9ed65ab8fc2a1feb09f6700b5b (commit)
       via  13eb9e8620e67d7c617423ac1992a720ecdfbf7d (commit)
       via  bf41f8dc2265ca0cd9ffb8b8c11047291e69ca3c (commit)
       via  926a65fa08617be677a93e9e388df0f229b01067 (commit)
       via  1c7930a7ba19706d388e4f8dcf2a55a886b74cd2 (commit)
       via  61b01087195d5d1f875f01c5fd2eac5dc61d012d (commit)
       via  84fcd68d77cc4aba23721e234622c33666e96c49 (commit)
       via  96f093b960839b26ce37d9ff470933eed9c2b135 (commit)
       via  e828a215cc73946fa3681fcd88c3ef76b68272bd (commit)
       via  0bea88f134736d6fd2872f77feaf309aca6c1bc6 (commit)
       via  406cb1fd4af84fcbdf8339cf1afdae2cfb3b7946 (commit)
       via  55689c559b3ac60765940d64a5b51007f94bddf7 (commit)
       via  925ac83b98b02abec3f7f2a70b7c83170f851e29 (commit)
       via  3f47015eab1abd9c7193a9e740f794c6a718c9f7 (commit)
       via  4064b389d13d2861083499517f51d89492156099 (commit)
       via  926985d03e3486f1a83615dc2794d310cb2cb520 (commit)
       via  189f58f73fe02cf2729ab26d6ce8ab6469e82a1c (commit)
       via  1ab0f2e8448a20674bfb8d12d463e5b3fec3ac6e (commit)
       via  dcb32f7928972c3ebe66f13a08560a1e19c62866 (commit)
       via  e25099da714a10dd3bc24be0002f9174fb9610c9 (commit)
       via  2d39d007d30f65589cfe4b671dc91cdab70ed107 (commit)
       via  e9798fc8931856a7eaeee37155600146d7dc7c57 (commit)
       via  59e01319e369a7c8e4f9a326d603dee7e3924c6b (commit)
       via  ad5a633a9e77e561675aca5263853db8161e82fa (commit)
       via  6c5a9b252b7bc062ed807aff342d0314811b5bde (commit)
       via  8320629b004d5fc8194afb5d277a0d9e01299121 (commit)
       via  0eb494bd7c49f0559c870d8a687ad0552f2feeb7 (commit)
       via  f79a424a36d3a5896c43c5cae5d88d690ecbe90e (commit)
       via  75bda54b2b5cdf06f334e72cd554b616a887d1cf (commit)
       via  2b97fc4f4f30bff13b94ad9b25766b4a6b2f6655 (commit)
       via  a1301a0545acc48bf2f94731cb26577806e3c383 (commit)
       via  8bb79638bc658d8e57b15ae1b16d28a08ec06a69 (commit)
       via  40cf6abada7f06648643b14b9b7db21d0fde3b27 (commit)
       via  81b49bb4d72fdfb5db8d7ad5f9b086c489acdb86 (commit)
       via  c4430b49b30dcd74226d272fa3da4812afc2c6f7 (commit)
       via  eeebde9d81c4bbc4e5388db5cd6148ca3589b91e (commit)
       via  4fae538655882db7c085dab798b4fb29c4a9d8f1 (commit)
       via  8c5e6268927737a472348d1ff8ecb2201c76b98a (commit)
       via  cda19a7cbc56ddd67c7d19ec7d072a64477d254b (commit)
       via  c65177c8ea0dfba3aaa84ea1bf2583b2d818d23d (commit)
       via  b5cfd5e541d4bbb7f13ad93392018711e19ba0e5 (commit)
       via  a2158e5b2c17043f0f3aa194009408aa73bd62ce (commit)
       via  85b06e8c212c9733cc77e71d8a72c72161dc34f2 (commit)
       via  4f87326ae6c17e26769b4ae276001b49d5bb3561 (commit)
       via  06c9c2a763326d4b30ff9448f726928538fba94c (commit)
       via  70af8c7c72300e1afe1974de22c117ff5566487d (commit)
       via  03e690228b6f5184d67a4ff3de56a861fcac9a23 (commit)
       via  d749aee2ec681e0304dd53c63f276af98edeaf31 (commit)
       via  f9a545ca4c82e51fc7c47793fec34eb5deb19e46 (commit)
       via  a4f1f8de765810aecff1194c74a108682e3de28e (commit)
       via  287edb431de6ae5d7106dd4e593a193908b9ba9f (commit)
       via  99d7c21284686ba3d021a6d09938b82ea56de783 (commit)
       via  309b24ff461b623770e950d6ff12654241bdd39b (commit)
       via  52d165984d1a7784a1a6e0a3b845b19559698203 (commit)
       via  c4c85ce1694bd421912d1902f2d614c15bebbea1 (commit)
       via  f78cf6ebc22712c470da4af720915b09ae8e8ebe (commit)
       via  4c250f85ed6ad7f697c42137f1e67aadacf73dac (commit)
       via  941eceae0a54d023dce0c43757b0104b8adbcc9c (commit)
       via  79143dc457f23670d860a2fa134b13eb62db490b (commit)
       via  80dd433545aecf82aa178365dbc6e0650e12907b (commit)
       via  d867ca0fabdb5398d6a964aa393fadf678af2bbf (commit)
       via  3eb58c78cacf7686435e963d423c6c035a737bc0 (commit)
       via  e90d2063e0bd98767fdcd38962ad5be6f2eda68e (commit)
       via  0699e756aead6ec1b3a80f5e044d8c3cb35e3280 (commit)
       via  384501f85cc9e66a686a96e349241442af29a56b (commit)
       via  707e700d4861b2c47235183ce6e98d985819dd2d (commit)
       via  67a88d3fd748cc42730e142cbfa79d0b7fb7a813 (commit)
       via  c1ebc31d07e2c04c0158fbd3e7289db650b41c1d (commit)
       via  ad3f4a5e40390f14762648986dae8430760202c2 (commit)
       via  a9d549be7404552a13a95db041e7e1da64729341 (commit)
       via  3eb8c8e08c993b1458a6d79f434e0305936bcd14 (commit)
       via  6fe98e3c2a669c9dc779980426a81fbe1ddcfff3 (commit)
       via  edfe1b966d53caf3ed9e17cd525b0d94beff0aaf (commit)
       via  bfd50c768ccf03b2e4f3d3ecbeb5fb344ff79129 (commit)
       via  ae21ebb0f609f8a2aa8ffc3d4b84c465111ec2c3 (commit)
       via  7cf66b7e44e389205ae4344764fbf136550854ce (commit)
       via  fd9334b7d856c4f748919d035b2a4ad3c85b545b (commit)
       via  0c3b69c6e170bee7dd775090af2bdd1cae900080 (commit)
       via  0fdc040591f07f5f876ff2a16ea363e9026346ae (commit)
       via  f5edd310465966137f0cd4e2109d90f7e5d5965f (commit)
       via  73ac6b09eeeebcdb03965076d4aa8a8a7a361ebe (commit)
       via  86a307f08882d02ad443e848e096a30ca14ec918 (commit)
      from  9395f12c95a2519803a0dc15b56424c22df88c84 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                         |   30 +
 doc/guide/Makefile.am                             |   18 +-
 doc/guide/bind10-guide.html                       |   46 +-
 doc/guide/bind10-guide.xml                        |  121 ++-
 doc/guide/bind10-messages.html                    |  841 +++++++++
 doc/guide/bind10-messages.xml                     | 2018 +++++++++++++++++++++
 src/bin/auth/Makefile.am                          |   14 +-
 src/bin/auth/auth_log.cc                          |   26 +
 src/bin/auth/auth_log.h                           |   54 +
 src/bin/auth/auth_messages.mes                    |  260 +++
 src/bin/auth/auth_srv.cc                          |  173 +--
 src/bin/auth/auth_srv.h                           |   21 -
 src/bin/auth/benchmarks/Makefile.am               |    3 +
 src/bin/auth/command.cc                           |   37 +-
 src/bin/auth/main.cc                              |   55 +-
 src/bin/auth/statistics.cc                        |   35 +-
 src/bin/auth/statistics.h                         |    7 +-
 src/bin/auth/tests/Makefile.am                    |    4 +
 src/bin/auth/tests/auth_srv_unittest.cc           |    9 -
 src/bin/auth/tests/statistics_unittest.cc         |    3 +-
 src/bin/bind10/run_bind10.sh.in                   |    2 +-
 src/bin/bind10/tests/Makefile.am                  |    8 +
 src/bin/bindctl/run_bindctl.sh.in                 |    8 +
 src/bin/bindctl/tests/Makefile.am                 |    8 +
 src/bin/cfgmgr/plugins/b10logging.py              |   19 +-
 src/bin/cfgmgr/plugins/tests/Makefile.am          |    4 +-
 src/bin/cfgmgr/plugins/tests/logging_test.py      |  135 ++
 src/bin/cfgmgr/tests/Makefile.am                  |    8 +
 src/bin/cmdctl/tests/Makefile.am                  |    8 +
 src/bin/loadzone/run_loadzone.sh.in               |    8 +
 src/bin/loadzone/tests/correct/Makefile.am        |    9 +-
 src/bin/loadzone/tests/error/Makefile.am          |    9 +-
 src/bin/msgq/tests/Makefile.am                    |    8 +
 src/bin/resolver/Makefile.am                      |   12 +-
 src/bin/resolver/main.cc                          |    8 +-
 src/bin/resolver/resolver.cc                      |   63 +-
 src/bin/resolver/resolver_log.h                   |    2 +-
 src/bin/resolver/resolver_messages.mes            |  198 ++
 src/bin/resolver/resolverdef.mes                  |  193 --
 src/bin/resolver/tests/Makefile.am                |    2 +-
 src/bin/tests/Makefile.am                         |    8 +
 src/bin/xfrin/Makefile.am                         |   11 +-
 src/bin/xfrin/tests/Makefile.am                   |    4 +-
 src/bin/xfrin/xfrin.py.in                         |   60 +-
 src/bin/xfrin/xfrin_messages.mes                  |   91 +
 src/bin/xfrout/Makefile.am                        |   10 +-
 src/bin/xfrout/tests/Makefile.am                  |    4 +-
 src/bin/xfrout/xfrout.py.in                       |   76 +-
 src/bin/xfrout/xfrout_messages.mes                |  140 ++
 src/bin/zonemgr/tests/Makefile.am                 |    8 +
 src/lib/acl/Makefile.am                           |   30 +-
 src/lib/acl/dns.cc                                |   34 +
 src/lib/acl/dns.h                                 |   89 +
 src/lib/acl/ip_check.cc                           |  111 ++
 src/lib/acl/ip_check.h                            |  354 ++++
 src/lib/acl/loader.cc                             |   46 +
 src/lib/acl/loader.h                              |  448 +++++
 src/lib/acl/logic_check.h                         |  206 +++
 src/lib/acl/tests/Makefile.am                     |   15 +-
 src/lib/acl/tests/acl_test.cc                     |   68 +-
 src/lib/acl/tests/creators.h                      |  154 ++
 src/lib/acl/tests/dns_test.cc                     |   35 +
 src/lib/acl/tests/ip_check_unittest.cc            |  588 ++++++
 src/lib/acl/tests/loader_test.cc                  |  371 ++++
 src/lib/acl/tests/logcheck.h                      |   91 +
 src/lib/acl/tests/logic_check_test.cc             |  228 +++
 src/lib/acl/tests/run_unittests.cc                |    3 +-
 src/lib/asiodns/Makefile.am                       |   12 +-
 src/lib/asiodns/asiodef.mes                       |   56 -
 src/lib/asiodns/asiodns_messages.mes              |   56 +
 src/lib/asiodns/io_fetch.cc                       |   28 +-
 src/lib/cc/data.h                                 |    2 +-
 src/lib/config/Makefile.am                        |   12 +-
 src/lib/config/ccsession.cc                       |   66 +-
 src/lib/config/ccsession.h                        |   56 +-
 src/lib/config/config_log.h                       |    2 +-
 src/lib/config/config_messages.mes                |   59 +
 src/lib/config/configdef.mes                      |   57 -
 src/lib/config/tests/ccsession_unittests.cc       |   62 +
 src/lib/datasrc/Makefile.am                       |   12 +-
 src/lib/datasrc/cache.cc                          |   17 +-
 src/lib/datasrc/datasrc_messages.mes              |  493 +++++
 src/lib/datasrc/logger.h                          |    2 +-
 src/lib/datasrc/messagedef.mes                    |  494 -----
 src/lib/exceptions/exceptions.h                   |   11 +
 src/lib/log/logger.cc                             |   10 +-
 src/lib/log/logger.h                              |   29 +-
 src/lib/log/logger_impl.cc                        |    2 +-
 src/lib/log/logger_manager.cc                     |    4 +-
 src/lib/log/logger_support.cc                     |   47 +-
 src/lib/log/logger_support.h                      |   24 +-
 src/lib/log/tests/Makefile.am                     |    1 +
 src/lib/log/tests/destination_test.sh.in          |   24 +-
 src/lib/log/tests/local_file_test.sh.in           |   39 +-
 src/lib/log/tests/logger_support_unittest.cc      |   72 +
 src/lib/log/tests/severity_test.sh.in             |   52 +-
 src/lib/python/isc/cc/tests/Makefile.am           |    8 +
 src/lib/python/isc/config/ccsession.py            |   32 +-
 src/lib/python/isc/config/tests/Makefile.am       |    9 +
 src/lib/python/isc/config/tests/ccsession_test.py |   39 +-
 src/lib/python/isc/datasrc/tests/Makefile.am      |    8 +
 src/lib/python/isc/log/Makefile.am                |    3 +
 src/lib/python/isc/log/__init__.py                |   10 +-
 src/lib/python/isc/log/log.cc                     |   63 +-
 src/lib/python/isc/log/tests/Makefile.am          |    3 +-
 src/lib/python/isc/log/tests/console.out          |    8 +-
 src/lib/python/isc/log/tests/log_test.py          |   30 +
 src/lib/python/isc/net/tests/Makefile.am          |    8 +
 src/lib/python/isc/notify/tests/Makefile.am       |    2 +-
 src/lib/python/isc/util/tests/Makefile.am         |    8 +
 tools/system_messages.py                          |  413 +++++
 111 files changed, 8821 insertions(+), 1394 deletions(-)
 create mode 100644 doc/guide/bind10-messages.html
 create mode 100644 doc/guide/bind10-messages.xml
 create mode 100644 src/bin/auth/auth_log.cc
 create mode 100644 src/bin/auth/auth_log.h
 create mode 100644 src/bin/auth/auth_messages.mes
 mode change 100644 => 100755 src/bin/bindctl/run_bindctl.sh.in
 create mode 100644 src/bin/cfgmgr/plugins/tests/logging_test.py
 mode change 100644 => 100755 src/bin/loadzone/run_loadzone.sh.in
 create mode 100644 src/bin/resolver/resolver_messages.mes
 delete mode 100644 src/bin/resolver/resolverdef.mes
 create mode 100644 src/bin/xfrin/xfrin_messages.mes
 create mode 100644 src/bin/xfrout/xfrout_messages.mes
 create mode 100644 src/lib/acl/dns.cc
 create mode 100644 src/lib/acl/dns.h
 create mode 100644 src/lib/acl/ip_check.cc
 create mode 100644 src/lib/acl/ip_check.h
 create mode 100644 src/lib/acl/loader.cc
 create mode 100644 src/lib/acl/loader.h
 create mode 100644 src/lib/acl/logic_check.h
 create mode 100644 src/lib/acl/tests/creators.h
 create mode 100644 src/lib/acl/tests/dns_test.cc
 create mode 100644 src/lib/acl/tests/ip_check_unittest.cc
 create mode 100644 src/lib/acl/tests/loader_test.cc
 create mode 100644 src/lib/acl/tests/logcheck.h
 create mode 100644 src/lib/acl/tests/logic_check_test.cc
 delete mode 100644 src/lib/asiodns/asiodef.mes
 create mode 100644 src/lib/asiodns/asiodns_messages.mes
 create mode 100644 src/lib/config/config_messages.mes
 delete mode 100644 src/lib/config/configdef.mes
 create mode 100644 src/lib/datasrc/datasrc_messages.mes
 delete mode 100644 src/lib/datasrc/messagedef.mes
 create mode 100644 src/lib/log/tests/logger_support_unittest.cc
 create mode 100644 tools/system_messages.py

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 6c6da37..c1ff9d0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,33 @@
+263.	[func]      jelte
+	Logging configuration can now also accept a * as a first-level
+	name (e.g. '*', or '*.cache'), indicating that every module
+	should use that configuration, unless overridden by an explicit
+	logging configuration for that module
+	(Trac 1004, git 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef)
+
+262.	[func]      stephen
+	Add some initial documentation about the logging framework.
+	Provide BIND 10 Messages Manual in HTML and DocBook? XML formats.
+	This provides all the log message descriptions in a single document.
+	A developer tool, tools/system_messages.py (available in git repo),
+	was written to generate this.
+	(Trac 1012, git 502100d7b9cd9d2300e78826a3bddd024ef38a74)
+
+261.	[func]      stephen
+	Add new-style logging messages to b10-auth.
+	(Trac 738, git c021505a1a0d6ecb15a8fd1592b94baff6d115f4)
+
+260.	[func]      stephen
+	Remove comma between message identification and the message
+	text in the new-style logging messages.
+	(Trac 1031, git 1c7930a7ba19706d388e4f8dcf2a55a886b74cd2)
+
+259.	[bug]       stephen
+	Logging now correctly initialized in b10-auth.  Also, fixed
+	bug whereby querying for "version.bind txt ch" would cause
+	b10-auth to crash if BIND 10 was started with the "-v" switch.
+	(Trac 1022,1023, git 926a65fa08617be677a93e9e388df0f229b01067)
+
 258.	[build]		jelte
 	Now builds and runs with Python 3.2
 	(Trac #710, git dae1d2e24f993e1eef9ab429326652f40a006dfb)
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index c790139..c84ad06 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -1,10 +1,12 @@
 EXTRA_DIST = bind10-guide.css
-EXTRA_DIST += bind10-guide.html
-EXTRA_DIST += bind10-guide.xml
+EXTRA_DIST += bind10-guide.xml bind10-guide.html
+EXTRA_DIST += bind10-messages.xml bind10-messages.html
 
 # This is not a "man" manual, but reuse this for now for docbook.
 if ENABLE_MAN
 
+.PHONY: bind10-messages.xml
+
 bind10-guide.html: bind10-guide.xml
 	xsltproc --novalid --xinclude --nonet \
 		--path $(top_builddir)/doc \
@@ -13,4 +15,16 @@ bind10-guide.html: bind10-guide.xml
 		http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
 		$(srcdir)/bind10-guide.xml
 
+bind10-messages.html: bind10-messages.xml
+	xsltproc --novalid --xinclude --nonet \
+		--path $(top_builddir)/doc \
+		-o $@ \
+		--stringparam html.stylesheet $(srcdir)/bind10-guide.css \
+		http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
+		$(srcdir)/bind10-messages.xml
+
+# So many dependencies that it's easiest just to regenerate it every time
+bind10-messages.xml:
+	$(PYTHON) $(top_srcdir)/tools/system_messages.py -o $@ $(top_srcdir)
+
 endif
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 069f508..5754cf0 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -6,7 +6,7 @@
       </p><p>
         This is the reference guide for BIND 10 version 20110519.
 	The most up-to-date version of this document, along with
-	other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.  </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
 stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
 ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
 /a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+	other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.  </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
 stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
 ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
 /a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
       BIND is the popular implementation of a DNS server, developer
       interfaces, and DNS tools.
       BIND 10 is a rewrite of BIND 9.  BIND 10 is written in C++ and Python
@@ -684,4 +684,48 @@ This may be a temporary setting until then.
     "stats.timestamp": 1295543046.823504
 }
        </pre><p>
+    </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
+        Each message written by BIND 10 to the configured logging destinations
+        comprises a number of components that identify the origin of the
+        message and, if the message indicates a problem, information about the
+        problem that may be useful in fixing it.
+    </p><p>
+        Consider the message below logged to a file:
+        </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+    ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
+    </p><p>
+      Note: the layout of messages written to the system logging
+      file (syslog) may be slightly different.  This message has
+      been split across two lines here for display reasons; in the
+      logging file, it will appear on one line.)
+    </p><p>
+      The log message comprises a number of components:
+
+        </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+            The date and time at which the message was generated.
+        </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+            The severity of the message.
+        </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+	    The source of the message.  This comprises two components:
+	    the BIND 10 process generating the message (in this
+	    case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+	    within the program from which the message originated
+	    (which in the example is the asynchronous I/O link
+	    module, asiolink).
+        </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+	    The message identification.  Every message in BIND 10
+	    has a unique identification, which can be used as an
+	    index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
+	    Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
+        </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+            A brief description of the cause of the problem.  Within this text,
+            information relating to the condition that caused the message to
+            be logged will be included.  In this example, error number 111
+            (an operating system-specific error number) was encountered when
+            trying to open a TCP connection to port 53 on the local system
+            (address 127.0.0.1).  The next step would be to find out the reason
+            for the failure by consulting your system's documentation to
+            identify what error number 111 means.
+        </p></dd></dl></div><p>
+
     </p></div></div></body></html>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 5eb4dc7..7d1a006 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -13,7 +13,7 @@
     <subtitle>Administrator Reference for BIND 10</subtitle>
 
     <copyright>
-      <year>2010</year><holder>Internet Systems Consortium, Inc.</holder>
+      <year>2010-2011</year><holder>Internet Systems Consortium, Inc.</holder>
     </copyright>
 
     <abstract>
@@ -81,15 +81,13 @@
 
       <para>
 	BIND 10 uses the Botan crypto library for C++. It requires
-	at least Botan version 1.8. To build BIND 10, install the
-	Botan libraries and development include headers.
+	at least Botan version 1.8.
       </para>
 
-<!--
-TODO
-Debian and Ubuntu:
- libgmp3-dev and libbz2-dev required for botan too
--->
+      <para>
+	BIND 10 uses the log4cplus C++ logging library. It requires
+	at least log4cplus version 1.0.3.
+      </para>
 
       <para>
 	The authoritative server requires SQLite 3.3.9 or newer.
@@ -303,6 +301,12 @@ var/
 
     <section>
       <title>Building Requirements</title>
+
+        <para>
+          In addition to the run-time requirements, building BIND 10
+          from source code requires various development include headers.
+        </para>
+
         <note>
           <simpara>
             Some operating systems have split their distribution packages into
@@ -320,6 +324,19 @@ var/
         </para>
 
         <para>
+	  To build BIND 10, also install the Botan (at least version
+	  1.8) and the log4cplus (at least version 1.0.3)
+          development include headers.
+        </para>
+
+<!--
+TODO
+Debian and Ubuntu:
+ libgmp3-dev and libbz2-dev required for botan too
+-->
+
+        <para>
+<!-- TODO: is this needed at build time? test time? -->
 	  The Python Library and Python _sqlite3 module are required to
           enable the Xfrout and Xfrin support.
         </para>
@@ -333,7 +350,7 @@ var/
           Building BIND 10 also requires a C++ compiler and
           standard development headers, make, and pkg-config.
           BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
-          4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+          4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
         </para>
     </section>
 
@@ -1433,6 +1450,92 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
 
   </chapter>
 
+  <chapter id="logging">
+    <title>Logging</title>
+
+<!-- TODO: how to configure logging, logging destinations etc. -->
+
+    <para>
+        Each message written by BIND 10 to the configured logging destinations
+        comprises a number of components that identify the origin of the
+        message and, if the message indicates a problem, information about the
+        problem that may be useful in fixing it.
+    </para>
+
+    <para>
+        Consider the message below logged to a file:
+        <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+    ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+    </para>
+
+    <para>
+      Note: the layout of messages written to the system logging
+      file (syslog) may be slightly different.  This message has
+      been split across two lines here for display reasons; in the
+      logging file, it will appear on one line.)
+    </para>
+
+    <para>
+      The log message comprises a number of components:
+
+        <variablelist>
+        <varlistentry>
+        <term>2011-06-15 13:48:22.034</term>
+        <listitem><para>
+            The date and time at which the message was generated.
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>ERROR</term>
+        <listitem><para>
+            The severity of the message.
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>[b10-resolver.asiolink]</term>
+        <listitem><para>
+	    The source of the message.  This comprises two components:
+	    the BIND 10 process generating the message (in this
+	    case, <command>b10-resolver</command>) and the module
+	    within the program from which the message originated
+	    (which in the example is the asynchronous I/O link
+	    module, asiolink).
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>ASIODNS_OPENSOCK</term>
+        <listitem><para>
+	    The message identification.  Every message in BIND 10
+	    has a unique identification, which can be used as an
+	    index into the <ulink
+	    url="bind10-messages.html"><citetitle>BIND 10 Messages
+	    Manual</citetitle></ulink> (<ulink
+	    url="http://bind10.isc.org/docs/bind10-messages.html"
+	    />) from which more information can be obtained.
+        </para></listitem>
+        </varlistentry>
+
+        <varlistentry>
+        <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+        <listitem><para>
+            A brief description of the cause of the problem.  Within this text,
+            information relating to the condition that caused the message to
+            be logged will be included.  In this example, error number 111
+            (an operating system-specific error number) was encountered when
+            trying to open a TCP connection to port 53 on the local system
+            (address 127.0.0.1).  The next step would be to find out the reason
+            for the failure by consulting your system's documentation to
+            identify what error number 111 means.
+        </para></listitem>
+        </varlistentry>
+        </variablelist>
+
+    </para>
+  </chapter>
+
 <!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
 
   <!-- <index>    <title>Index</title> </index> -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
new file mode 100644
index 0000000..b075e96
--- /dev/null
+++ b/doc/guide/bind10-messages.html
@@ -0,0 +1,841 @@
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+        20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+	  Internet Systems Consortium (ISC). It includes DNS libraries
+	  and modular components for controlling authoritative and
+	  recursive DNS servers.
+      </p><p>
+        This is the messages manual for BIND 10 version 20110519.
+	    The most up-to-date version of this document, along with
+	    other documents for BIND 10, can be found at
+        <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+      </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dt><span class="chapter"><a href="#messages">2. BIND 10 Messages</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><p>
+      This document lists each message that can be logged by the
+      programs in the BIND 10 package.  Each entry in this manual
+      is of the form:
+      </p><pre class="screen">IDENTIFICATION message-text</pre><p>
+      ... where "IDENTIFICATION" is the message identification included
+      in each message logged and "message-text" is the accompanying
+      message text.  The "message-text" may include placeholders of the
+      form "%1", "%2" etc.; these parameters are replaced by relevant
+      values when the message is logged.
+    </p><p>
+      Each entry is also accompanied by a description giving more
+      information about the circumstances that result in the message
+      being logged.
+    </p><p>
+      For information on configuring and using BIND 10 logging,
+      refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
+    </p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
+      </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records the the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The the number of the system error that cause the problem is given in the
+message.
+</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying read data from
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+This message should not appear and indicates an internal error if it does.
+Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+The termination method of the resolver's upstream fetch class was called with
+an unknown result code (which is given in the message).  This message should
+not appear and may indicate an internal error.  Please enter a bug report.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the
+command and control channel. An unexpected exception was thrown. This
+most likely points to an internal inconsistency in the module code. The
+exception message is appended to the log error, and the module will
+continue to run, but will not send back an answer.
+</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was a parse error in the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
+The module specification file for this module was rejected by the
+configuration manager. The full error message answer from the
+configuration manager is appended to the log error. The most likely
+cause is that the module is of a different (specification file) version
+than the running configuration manager.
+</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file. Please
+verify that the filename is correct and that its contents are a valid
+BIND10 module specification.
+</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
+Debug information that the hotspot cache was created at startup.
+</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
+Debug information. The hotspot cache is being destroyed.
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
+The hotspot cache is enabled from now on.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
+Debug information. An item was successfully looked up in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+Debug information. An item is being removed from the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
+Debug information. We're processing some internal query for given name and
+type.
+</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
+Debug information. An RRset is being added to the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
+Debug information. A zone is being added into the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</p></dd><dt><a name="DATASRC_MEM_CNAME"></a><span class="term">DATASRC_MEM_CNAME CNAME at the domain '%1'</span></dt><dd><p>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some outher data to CNAME.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_CREATE"></a><span class="term">DATASRC_MEM_CREATE creating zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</p></dd><dt><a name="DATASRC_MEM_DELEG_FOUND"></a><span class="term">DATASRC_MEM_DELEG_FOUND delegation found at '%1'</span></dt><dd><p>
+Debug information. A delegation point was found above the requested record.
+</p></dd><dt><a name="DATASRC_MEM_DESTROY"></a><span class="term">DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A zone from in-memory data source is being destroyed.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_ENCOUNTERED"></a><span class="term">DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</span></dt><dd><p>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
+Debug information. A DNAME was found instead of the requested information.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</p></dd><dt><a name="DATASRC_MEM_DUP_RRSET"></a><span class="term">DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</span></dt><dd><p>
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</p></dd><dt><a name="DATASRC_MEM_EXACT_DELEGATION"></a><span class="term">DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</span></dt><dd><p>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</p></dd><dt><a name="DATASRC_MEM_FIND"></a><span class="term">DATASRC_MEM_FIND find '%1/%2'</span></dt><dd><p>
+Debug information. A search for the requested RRset is being started.
+</p></dd><dt><a name="DATASRC_MEM_FIND_ZONE"></a><span class="term">DATASRC_MEM_FIND_ZONE looking for zone '%1'</span></dt><dd><p>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
+Debug information. The content of master file is being loaded into the memory.
+</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+Debug information. The requested domain does not exist.
+</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</p></dd><dt><a name="DATASRC_MEM_NXRRSET"></a><span class="term">DATASRC_MEM_NXRRSET no such type '%1' at '%2'</span></dt><dd><p>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</p></dd><dt><a name="DATASRC_MEM_OUT_OF_ZONE"></a><span class="term">DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</span></dt><dd><p>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_RENAME"></a><span class="term">DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</span></dt><dd><p>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</p></dd><dt><a name="DATASRC_MEM_SINGLETON"></a><span class="term">DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</span></dt><dd><p>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_SUCCESS"></a><span class="term">DATASRC_MEM_SUCCESS query for '%1/%2' successful</span></dt><dd><p>
+Debug information. The requested record was found.
+</p></dd><dt><a name="DATASRC_MEM_SUPER_STOP"></a><span class="term">DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</span></dt><dd><p>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</p></dd><dt><a name="DATASRC_MEM_SWAP"></a><span class="term">DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</span></dt><dd><p>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_CANCEL"></a><span class="term">DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</span></dt><dd><p>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
+Debug information. A data source is being removed from meta data source.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC"></a><span class="term">DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</span></dt><dd><p>
+Debug information. A NSEC record covering this zone is being added.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC3"></a><span class="term">DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</span></dt><dd><p>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_RRSET"></a><span class="term">DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</span></dt><dd><p>
+Debug information. An RRset is being added to the response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_SOA"></a><span class="term">DATASRC_QUERY_ADD_SOA adding SOA of '%1'</span></dt><dd><p>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</p></dd><dt><a name="DATASRC_QUERY_AUTH_FAIL"></a><span class="term">DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
+Debug information. The whole referral information is being copied into the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GET_NS_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</span></dt><dd><p>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GLUE_FAIL"></a><span class="term">DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</p></dd><dt><a name="DATASRC_QUERY_INVALID_OP"></a><span class="term">DATASRC_QUERY_INVALID_OP invalid query operation requested</span></dt><dd><p>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is an auth query.
+</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for glue addresses.
+</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for referral information.
+</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a simple query.
+</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_NS"></a><span class="term">DATASRC_QUERY_MISSING_NS missing NS records for '%1'</span></dt><dd><p>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_SOA"></a><span class="term">DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</span></dt><dd><p>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NOGLUE_FAIL"></a><span class="term">DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC3"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_ZONE"></a><span class="term">DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</span></dt><dd><p>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
+Debug information. A sure query is being processed now.
+</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</p></dd><dt><a name="DATASRC_QUERY_REF_FAIL"></a><span class="term">DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_RRSIG"></a><span class="term">DATASRC_QUERY_RRSIG unable to answer RRSIG query</span></dt><dd><p>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</p></dd><dt><a name="DATASRC_QUERY_SIMPLE_FAIL"></a><span class="term">DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_UNKNOWN_RESULT"></a><span class="term">DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</span></dt><dd><p>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD"></a><span class="term">DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</span></dt><dd><p>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
+Debug information. The SQLite data source is closing the database file.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being created.
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is trying to identify, which zone
+should hold this domain.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+no such zone in our data.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS"></a><span class="term">DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</span></dt><dd><p>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT"></a><span class="term">DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREC"></a><span class="term">DATASRC_SQLITE_FINDREC looking for record '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF"></a><span class="term">DATASRC_SQLITE_FINDREF looking for referral at '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was trying to identify, if there's a referral. But
+it contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</span></dt><dd><p>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
+Debug information. We're trying to look up name preceding the supplied one.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</p></dd><dt><a name="DATASRC_STATIC_FIND"></a><span class="term">DATASRC_STATIC_FIND looking for '%1/%2'</span></dt><dd><p>
+Debug information. This resource record set is being looked up in the static
+data source.
+</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is above the maximum allowed value and has
+been reduced to that value.
+</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
+The string indicating the extended logging level (used by the underlying
+logger implementation code) is not of the stated form.  In particular,
+it starts DEBUG but does not end with an integer.
+</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is below the minimum allowed value and has
+been increased to that value.
+</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
+A log console output stream was given that was not recognized. The
+output stream should be one of "stdout", or "stderr"
+</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.  In
+this version of the code, such a condition is regarded as an error and the
+read will be abandoned.
+</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+Indicative of a programming error, when it started up, BIND10 detected that
+the given message ID had been registered by one or more modules.  (All message
+IDs should be unique throughout BIND10.)  This has no impact on the operation
+of the server other that erroneous messages may be logged.  (When BIND10 loads
+the message IDs (and their associated text), if a duplicate ID is found it is
+discarded.  However, when the module that supplied the duplicate ID logs that
+particular message, the text supplied by the module that added the original
+ID will be output - something that may bear no relation to the condition being
+logged.
+</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+During start-up a local message file was read.  A line with the listed
+message identification was found in the file, but the identification is not
+one contained in the compiled-in message dictionary.  Either the message
+identification has been mis-spelled in the file, or the local file was used
+for an earlier version of the software and the message with that
+identification has been removed.
+</p><p>
+This message may appear a number of times in the file, once for every such
+unknown message identification.
+</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
+The concatenation of the prefix and the message identification is used as
+a symbol in the C++ module; as such it may only contain
+</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+indicates the message compiler found a line in the message file comprising
+just the "%" and nothing else.
+</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+is generated when a line is found in the message file that contains the
+leading "%" and the message identification but no text.
+</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with more than one argument.
+</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument should be a valid C++ namespace.  The reader does a
+cursory check on its validity, checking that the characters in the namespace
+are correct.  The error is generated when the reader finds an invalid
+character. (Valid are alphanumeric characters, underscores and colons.)
+</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with no arguments.
+</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for the
+reason given.
+</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
+The program was not able to open the specified output file for the reason
+given.
+</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+The $PREFIX directive takes a single argument, a prefix to be added to the
+symbol names when a C++ .h file is created.  This error is generated when the
+compiler finds a $PREFIX directive with more than one argument.
+</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+The $PREFIX argument is used in a symbol name in a C++ header file.  As such,
+it must adhere to restrictions on C++ symbol names (e.g. may only contain
+alphanumeric characters or underscores, and may nor start with a digit).
+A $PREFIX directive was found with an argument (given in the message) that
+violates those restictions.
+</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND10 when it starts to read a
+local message file.  (A local message file may replace the text of one of more
+messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+The specified error was encountered reading from the named message file.
+</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
+</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing to
+the named output file.
+</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for a RR for the
+specified nameserver but received an invalid response.  Either the success
+function was called without a DNS message or the message was invalid on some
+way. (In the latter case, the error should have been picked up elsewhere in
+the processing logic, hence the raising of the error here.)
+</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for the given RR
+type and class, but instead received an answer with the given type and class.
+</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
+A debug message, this is output when a NSAS (nameserver address store -
+part of the resolver) lookup for a zone has been cancelled.
+</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message, this is output when a call is made to the nameserver address
+store (part of the resolver) to obtain the nameservers for the specified zone.
+</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver) is
+making a callback into the resolver to retrieve the address records for the
+specified nameserver.
+</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has been unable to retrieve the specified resource record for the specified
+nameserver.  This is not necessarily a problem - the nameserver may be
+unreachable, in which case the NSAS will try other nameservers in the zone.
+</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has retrieved the given address for the specified nameserver through an
+external query.
+</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the round-trip time (RTT) for a query made to the specified
+nameserver.  The RTT has been updated using the value given and the new RTT is
+displayed.  (The RTT is subject to a calculation that damps out sudden
+changes.  As a result, the new RTT is not necessarily equal to the RTT
+reported.)
+</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
+A debug message recording that an answer has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_CNAME"></a><span class="term">RESLIB_CNAME CNAME received in response to query for <%1></span></dt><dd><p>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent).  However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+A debug message, this indicates that a response was received for the specified
+query and was categorised as a referral.  However, the received message did
+not contain any NS RRsets.  This may indicate a programming error in the
+response classification code.
+</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question.  Previous debug
+messages will have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1:  %3</span></dt><dd><p>
+A debug message indicating that a protocol error was received.  As there
+are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path.  A SERVFAIL will be returned.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
+</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache.  The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question.  The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple.  The first action will be to lookup
+the specified tuple in the cache.  The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer.  The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
+A debug message giving the round-trip time of the last query and response.
+</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
+This is an internal debugging message and is only generated in unit tests.
+It indicates that all upstream queries from the resolver are being routed to
+the specified server, regardless of the address of the nameserver to which
+the query would normally be routed.  As it should never be seen in normal
+operation, it is a warning message instead of a debug message.
+</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+This is a debug message and should only be seen in unit tests.  A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and as
+there are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_TRUNCATED"></a><span class="term">RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</span></dt><dd><p>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP.  There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
+A debug message, the resolver received a NOTIFY message over TCP.  The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
+A debug message, the resolver received a NOTIFY message over UDP.  The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error).  The reason for the error, given as a parameter in the message,
+will give more details.
+</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
+A debug message, output when the resolver configuration has been successfully
+loaded.
+</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
+A debug message, the configuration has been updated with the specified
+information.
+</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
+A debug message, output when the Resolver() object has been created.
+</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+A debug message, this contains details of the response sent back to the querying
+system.
+</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
+This is an error message output when an unhandled exception is caught by the
+resolver.  All it can do is to shut down.
+</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
+The received query has passed all checks and is being forwarded to upstream
+servers.
+</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
+A debug message noting that an exception occurred during the processing of
+a received packet.  The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
+The resolver received a NOTIFY message over TCP.  The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver received a NOTIFY message.  As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
+The received query has passed all checks and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
+A warning message during startup, indicates that no root addresses have been
+set.  This may be because the resolver will get them from a priming query.
+</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message.  This is a malformed
+message, as a DNS query must contain only one question.  The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes).  It will return a message to the sender
+with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded).  The message parameters give
+a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
+This message is logged when a "print_message" command is received over the
+command channel.
+</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded).  The message parameters give a textual
+description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
+A debug message noting that the resolver is creating a RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
+A debug message indicating that the resolver has received a message.  Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+An error message indicating that the resolver configuration has specified a
+negative retry count.  Only zero or positive values are valid.
+</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
+A debug message, output when the main service object (which handles the
+received queries) is created.
+</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+A debug message, lists the parameters associated with the message.  These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers.  Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query.  Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</p><p>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the clent query might require a large number of queries to
+upstream nameservers.  Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout.  When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolve gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
+This information message is output when the resolver has shut down.
+</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+</p></dd></dl></div><p>
+    </p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
new file mode 100644
index 0000000..eaa8bb9
--- /dev/null
+++ b/doc/guide/bind10-messages.xml
@@ -0,0 +1,2018 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash  "&#x2014;" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<book>
+  <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+  <bookinfo>
+    <title>BIND 10 Messages Manual</title>
+
+    <copyright>
+      <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+    </copyright>
+
+    <abstract>
+      <para>BIND 10 is a Domain Name System (DNS) suite managed by
+	  Internet Systems Consortium (ISC). It includes DNS libraries
+	  and modular components for controlling authoritative and
+	  recursive DNS servers.
+      </para>
+      <para>
+        This is the messages manual for BIND 10 version &__VERSION__;.
+	    The most up-to-date version of this document, along with
+	    other documents for BIND 10, can be found at
+        <ulink url="http://bind10.isc.org/docs"/>.
+      </para>
+    </abstract>
+
+    <releaseinfo>This is the messages manual for BIND 10 version
+        &__VERSION__;.</releaseinfo>
+  </bookinfo>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+      This document lists each message that can be logged by the
+      programs in the BIND 10 package.  Each entry in this manual
+      is of the form:
+      <screen>IDENTIFICATION message-text</screen>
+      ... where "IDENTIFICATION" is the message identification included
+      in each message logged and "message-text" is the accompanying
+      message text.  The "message-text" may include placeholders of the
+      form "%1", "%2" etc.; these parameters are replaced by relevant
+      values when the message is logged.
+    </para>
+    <para>
+      Each entry is also accompanied by a description giving more
+      information about the circumstances that result in the message
+      being logged.
+    </para>
+    <para>
+      For information on configuring and using BIND 10 logging,
+      refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+    </para>
+  </chapter>
+
+  <chapter id="messages">
+    <title>BIND 10 Messages</title>
+    <para>
+      <variablelist>
+
+<varlistentry id="ASIODNS_FETCHCOMP">
+<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<listitem><para>
+A debug message, this records the the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_FETCHSTOP">
+<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<listitem><para>
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_OPENSOCK">
+<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The the number of the system error that cause the problem is given in the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_RECVSOCK">
+<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying read data from
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_RECVTMO">
+<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<listitem><para>
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_SENDSOCK">
+<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKORIGIN">
+<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+This message should not appear and indicates an internal error if it does.
+Please enter a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKRESULT">
+<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+The termination method of the resolver's upstream fetch class was called with
+an unknown result code (which is given in the message).  This message should
+not appear and may indicate an internal error.  Please enter a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG">
+<term>CONFIG_CCSESSION_MSG error in CC session message: %1</term>
+<listitem><para>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
+<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
+<listitem><para>
+There was an internal problem handling an incoming message on the
+command and control channel. An unexpected exception was thrown. This
+most likely points to an internal inconsistency in the module code. The
+exception message is appended to the log error, and the module will
+continue to run, but will not send back an answer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_FOPEN_ERR">
+<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_JSON_PARSE">
+<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
+<listitem><para>
+There was a parse error in the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MANAGER_CONFIG">
+<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
+<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<listitem><para>
+The module specification file for this module was rejected by the
+configuration manager. The full error message answer from the
+configuration manager is appended to the log error. The most likely
+cause is that the module is of a different (specification file) version
+than the running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MODULE_SPEC">
+<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file. Please
+verify that the filename is correct and that its contents are a valid
+BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_CREATE">
+<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
+<listitem><para>
+Debug information that the hotspot cache was created at startup.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DESTROY">
+<term>DATASRC_CACHE_DESTROY destroying the hotspot cache</term>
+<listitem><para>
+Debug information. The hotspot cache is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DISABLE">
+<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<listitem><para>
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_ENABLE">
+<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<listitem><para>
+The hotspot cache is enabled from now on.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_EXPIRED">
+<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<listitem><para>
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FOUND">
+<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
+<listitem><para>
+Debug information. An item was successfully looked up in the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FULL">
+<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<listitem><para>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_INSERT">
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<listitem><para>
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_NOT_FOUND">
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<listitem><para>
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_OLD_FOUND">
+<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<listitem><para>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_REMOVE">
+<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<listitem><para>
+Debug information. An item is being removed from the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_SLOTS">
+<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<listitem><para>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DO_QUERY">
+<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
+<listitem><para>
+Debug information. We're processing some internal query for given name and
+type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_RRSET">
+<term>DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</term>
+<listitem><para>
+Debug information. An RRset is being added to the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
+<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
+<listitem><para>
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_ZONE">
+<term>DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</term>
+<listitem><para>
+Debug information. A zone is being added into the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ANY_SUCCESS">
+<term>DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</term>
+<listitem><para>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME">
+<term>DATASRC_MEM_CNAME CNAME at the domain '%1'</term>
+<listitem><para>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_COEXIST">
+<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
+<listitem><para>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some outher data to CNAME.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_TO_NONEMPTY">
+<term>DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</term>
+<listitem><para>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CREATE">
+<term>DATASRC_MEM_CREATE creating zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DELEG_FOUND">
+<term>DATASRC_MEM_DELEG_FOUND delegation found at '%1'</term>
+<listitem><para>
+Debug information. A delegation point was found above the requested record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DESTROY">
+<term>DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A zone from in-memory data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_ENCOUNTERED">
+<term>DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_FOUND">
+<term>DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</term>
+<listitem><para>
+Debug information. A DNAME was found instead of the requested information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_NS">
+<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<listitem><para>
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DOMAIN_EMPTY">
+<term>DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</term>
+<listitem><para>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DUP_RRSET">
+<term>DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</term>
+<listitem><para>
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_EXACT_DELEGATION">
+<term>DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</term>
+<listitem><para>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND">
+<term>DATASRC_MEM_FIND find '%1/%2'</term>
+<listitem><para>
+Debug information. A search for the requested RRset is being started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND_ZONE">
+<term>DATASRC_MEM_FIND_ZONE looking for zone '%1'</term>
+<listitem><para>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_LOAD">
+<term>DATASRC_MEM_LOAD loading zone '%1' from file '%2'</term>
+<listitem><para>
+Debug information. The content of master file is being loaded into the memory.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NOTFOUND">
+<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<listitem><para>
+Debug information. The requested domain does not exist.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NS_ENCOUNTERED">
+<term>DATASRC_MEM_NS_ENCOUNTERED encountered a NS</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NXRRSET">
+<term>DATASRC_MEM_NXRRSET no such type '%1' at '%2'</term>
+<listitem><para>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_OUT_OF_ZONE">
+<term>DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</term>
+<listitem><para>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_RENAME">
+<term>DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</term>
+<listitem><para>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SINGLETON">
+<term>DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</term>
+<listitem><para>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUCCESS">
+<term>DATASRC_MEM_SUCCESS query for '%1/%2' successful</term>
+<listitem><para>
+Debug information. The requested record was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUPER_STOP">
+<term>DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</term>
+<listitem><para>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SWAP">
+<term>DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</term>
+<listitem><para>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_CANCEL">
+<term>DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</term>
+<listitem><para>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
+<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_NS">
+<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD">
+<term>DATASRC_META_ADD adding a data source into meta data source</term>
+<listitem><para>
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
+<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
+<listitem><para>
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_REMOVE">
+<term>DATASRC_META_REMOVE removing data source from meta data source</term>
+<listitem><para>
+Debug information. A data source is being removed from meta data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC">
+<term>DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</term>
+<listitem><para>
+Debug information. A NSEC record covering this zone is being added.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC3">
+<term>DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</term>
+<listitem><para>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_RRSET">
+<term>DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</term>
+<listitem><para>
+Debug information. An RRset is being added to the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_SOA">
+<term>DATASRC_QUERY_ADD_SOA adding SOA of '%1'</term>
+<listitem><para>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_AUTH_FAIL">
+<term>DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_BAD_REFERRAL">
+<term>DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</term>
+<listitem><para>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CACHED">
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<listitem><para>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
+<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<listitem><para>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_COPY_AUTH">
+<term>DATASRC_QUERY_COPY_AUTH copying authoritative section into message</term>
+<listitem><para>
+Debug information. The whole referral information is being copied into the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_DELEGATION">
+<term>DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</term>
+<listitem><para>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
+<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<listitem><para>
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_DNAME">
+<term>DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</term>
+<listitem><para>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FAIL">
+<term>DATASRC_QUERY_FAIL query failed</term>
+<listitem><para>
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
+<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
+<listitem><para>
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_MX_ADDITIONAL">
+<term>DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_NS_ADDITIONAL">
+<term>DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GLUE_FAIL">
+<term>DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_INVALID_OP">
+<term>DATASRC_QUERY_INVALID_OP invalid query operation requested</term>
+<listitem><para>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_AUTH">
+<term>DATASRC_QUERY_IS_AUTH auth query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is an auth query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_GLUE">
+<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for glue addresses.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
+<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_REF">
+<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for referral information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_SIMPLE">
+<term>DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a simple query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISPLACED_TASK">
+<term>DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</term>
+<listitem><para>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_NS">
+<term>DATASRC_QUERY_MISSING_NS missing NS records for '%1'</term>
+<listitem><para>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_SOA">
+<term>DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</term>
+<listitem><para>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NOGLUE_FAIL">
+<term>DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC">
+<term>DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC3">
+<term>DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_ZONE">
+<term>DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</term>
+<listitem><para>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROCESS">
+<term>DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</term>
+<listitem><para>
+Debug information. A sure query is being processed now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
+<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<listitem><para>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_REF_FAIL">
+<term>DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_RRSIG">
+<term>DATASRC_QUERY_RRSIG unable to answer RRSIG query</term>
+<listitem><para>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SIMPLE_FAIL">
+<term>DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
+<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
+<listitem><para>
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TASK_FAIL">
+<term>DATASRC_QUERY_TASK_FAIL task failed with %1</term>
+<listitem><para>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
+<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<listitem><para>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_UNKNOWN_RESULT">
+<term>DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</term>
+<listitem><para>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD">
+<term>DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</term>
+<listitem><para>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_FAIL">
+<term>DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</term>
+<listitem><para>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_REFERRAL">
+<term>DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CLOSE">
+<term>DATASRC_SQLITE_CLOSE closing SQLite database</term>
+<listitem><para>
+Debug information. The SQLite data source is closing the database file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CREATE">
+<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_DESTROY">
+<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
+<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is trying to identify, which zone
+should hold this domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<listitem><para>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+no such zone in our data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND">
+<term>DATASRC_SQLITE_FIND looking for RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS">
+<term>DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</term>
+<listitem><para>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT">
+<term>DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREC">
+<term>DATASRC_SQLITE_FINDREC looking for record '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF">
+<term>DATASRC_SQLITE_FINDREF looking for referral at '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was trying to identify, if there's a referral. But
+it contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_BAD_CLASS">
+<term>DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3">
+<term>DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</term>
+<listitem><para>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE">
+<term>DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</term>
+<listitem><para>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_OPEN">
+<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS">
+<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
+<listitem><para>
+Debug information. We're trying to look up name preceding the supplied one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
+<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
+<listitem><para>
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_SETUP">
+<term>DATASRC_SQLITE_SETUP setting up SQLite database</term>
+<listitem><para>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_BAD_CLASS">
+<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<listitem><para>
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_CREATE">
+<term>DATASRC_STATIC_CREATE creating the static datasource</term>
+<listitem><para>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_FIND">
+<term>DATASRC_STATIC_FIND looking for '%1/%2'</term>
+<listitem><para>
+Debug information. This resource record set is being looked up in the static
+data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_UNEXPECTED_QUERY_STATE">
+<term>DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</term>
+<listitem><para>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_ABOVEDBGMAX">
+<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<listitem><para>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is above the maximum allowed value and has
+been reduced to that value.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BADDEBUG">
+<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<listitem><para>
+The string indicating the extended logging level (used by the underlying
+logger implementation code) is not of the stated form.  In particular,
+it starts DEBUG but does not end with an integer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BELOWDBGMIN">
+<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<listitem><para>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is below the minimum allowed value and has
+been increased to that value.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADDESTINATION">
+<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<listitem><para>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADSEVERITY">
+<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<listitem><para>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADSTREAM">
+<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<listitem><para>
+A log console output stream was given that was not recognized. The
+output stream should be one of "stdout", or "stderr"
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_DUPLNS">
+<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<listitem><para>
+When reading a message file, more than one $NAMESPACE directive was found.  In
+this version of the code, such a condition is regarded as an error and the
+read will be abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_DUPMSGID">
+<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<listitem><para>
+Indicative of a programming error, when it started up, BIND10 detected that
+the given message ID had been registered by one or more modules.  (All message
+IDs should be unique throughout BIND10.)  This has no impact on the operation
+of the server other that erroneous messages may be logged.  (When BIND10 loads
+the message IDs (and their associated text), if a duplicate ID is found it is
+discarded.  However, when the module that supplied the duplicate ID logs that
+particular message, the text supplied by the module that added the original
+ID will be output - something that may bear no relation to the condition being
+logged.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_IDNOTFND">
+<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<listitem><para>
+During start-up a local message file was read.  A line with the listed
+message identification was found in the file, but the identification is not
+one contained in the compiled-in message dictionary.  Either the message
+identification has been mis-spelled in the file, or the local file was used
+for an earlier version of the software and the message with that
+identification has been removed.
+</para><para>
+This message may appear a number of times in the file, once for every such
+unknown message identification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_INVMSGID">
+<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<listitem><para>
+The concatenation of the prefix and the message identification is used as
+a symbol in the C++ module; as such it may only contain
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NOMSGID">
+<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+indicates the message compiler found a line in the message file comprising
+just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NOMSGTXT">
+<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Message definition lines are lines starting with a "%".  The rest of the line
+should comprise the message ID and text describing the message.  This error
+is generated when a line is found in the message file that contains the
+leading "%" and the message identification but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSEXTRARG">
+<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with more than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSINVARG">
+<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument should be a valid C++ namespace.  The reader does a
+cursory check on its validity, checking that the characters in the namespace
+are correct.  The error is generated when the reader finds an invalid
+character. (Valid are alphanumeric characters, underscores and colons.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSNOARG">
+<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed.  This error is generated when the
+compiler finds a $NAMESPACE directive with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_OPENIN">
+<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for the
+reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_OPENOUT">
+<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<listitem><para>
+The program was not able to open the specified output file for the reason
+given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_PRFEXTRARG">
+<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<listitem><para>
+The $PREFIX directive takes a single argument, a prefix to be added to the
+symbol names when a C++ .h file is created.  This error is generated when the
+compiler finds a $PREFIX directive with more than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_PRFINVARG">
+<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $PREFIX argument is used in a symbol name in a C++ header file.  As such,
+it must adhere to restrictions on C++ symbol names (e.g. may only contain
+alphanumeric characters or underscores, and may nor start with a digit).
+A $PREFIX directive was found with an argument (given in the message) that
+violates those restictions.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_RDLOCMES">
+<term>MSG_RDLOCMES reading local message file %1</term>
+<listitem><para>
+This is an informational message output by BIND10 when it starts to read a
+local message file.  (A local message file may replace the text of one of more
+messages; the ID of the message will not be changed though.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_READERR">
+<term>MSG_READERR error reading from message file %1: %2</term>
+<listitem><para>
+The specified error was encountered reading from the named message file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_UNRECDIR">
+<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<listitem><para>
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_WRITERR">
+<term>MSG_WRITERR error writing to %1: %2</term>
+<listitem><para>
+The specified error was encountered by the message compiler when writing to
+the named output file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVRESPSTR">
+<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<listitem><para>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for a RR for the
+specified nameserver but received an invalid response.  Either the success
+function was called without a DNS message or the message was invalid on some
+way. (In the latter case, the error should have been picked up elsewhere in
+the processing logic, hence the raising of the error here.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVRESPTC">
+<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver.  The NSAS made a query for the given RR
+type and class, but instead received an answer with the given type and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUPCANCEL">
+<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<listitem><para>
+A debug message, this is output when a NSAS (nameserver address store -
+part of the resolver) lookup for a zone has been cancelled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUPZONE">
+<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<listitem><para>
+A debug message, this is output when a call is made to the nameserver address
+store (part of the resolver) to obtain the nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSADDR">
+<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver) is
+making a callback into the resolver to retrieve the address records for the
+specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSLKUPFAIL">
+<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has been unable to retrieve the specified resource record for the specified
+nameserver.  This is not necessarily a problem - the nameserver may be
+unreachable, in which case the NSAS will try other nameservers in the zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSLKUPSUCC">
+<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has retrieved the given address for the specified nameserver through an
+external query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_SETRTT">
+<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the round-trip time (RTT) for a query made to the specified
+nameserver.  The RTT has been updated using the value given and the new RTT is
+displayed.  (The RTT is subject to a calculation that damps out sudden
+changes.  As a result, the new RTT is not necessarily equal to the RTT
+reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_ANSWER">
+<term>RESLIB_ANSWER answer received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that an answer has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_CNAME">
+<term>RESLIB_CNAME CNAME received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question.  Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_DEEPEST">
+<term>RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</term>
+<listitem><para>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_FOLLOWCNAME">
+<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<listitem><para>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_LONGCHAIN">
+<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<listitem><para>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent).  However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NONSRRSET">
+<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<listitem><para>
+A debug message, this indicates that a response was received for the specified
+query and was categorised as a referral.  However, the received message did
+not contain any NS RRsets.  This may indicate a programming error in the
+response classification code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NSASLOOK">
+<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<listitem><para>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NXDOMRR">
+<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question.  Previous debug
+messages will have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOL">
+<term>RESLIB_PROTOCOL protocol error in answer for %1:  %3</term>
+<listitem><para>
+A debug message indicating that a protocol error was received.  As there
+are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOLRTRY">
+<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RCODERR">
+<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<listitem><para>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path.  A SERVFAIL will be returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERZONE">
+<term>RESLIB_REFERZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESCAFND">
+<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache.  The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESCANOTFND">
+<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question.  The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESOLVE">
+<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple.  The first action will be to lookup
+the specified tuple in the cache.  The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RRSETFND">
+<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer.  The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RTT">
+<term>RESLIB_RTT round-trip time of last query calculated as %1 ms</term>
+<listitem><para>
+A debug message giving the round-trip time of the last query and response.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNCAFND">
+<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNCALOOK">
+<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQUFAIL">
+<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQUSUCC">
+<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TESTSERV">
+<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<listitem><para>
+This is an internal debugging message and is only generated in unit tests.
+It indicates that all upstream queries from the resolver are being routed to
+the specified server, regardless of the address of the nameserver to which
+the query would normally be routed.  As it should never be seen in normal
+operation, it is a warning message instead of a debug message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TESTUPSTR">
+<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<listitem><para>
+This is a debug message and should only be seen in unit tests.  A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUT">
+<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and as
+there are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUTRTRY">
+<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver.  After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TRUNCATED">
+<term>RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</term>
+<listitem><para>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP.  There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_UPSTREAM">
+<term>RESLIB_UPSTREAM sending upstream query for <%1> to %2</term>
+<listitem><para>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFRTCP">
+<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<listitem><para>
+A debug message, the resolver received a NOTIFY message over TCP.  The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFRUDP">
+<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<listitem><para>
+A debug message, the resolver received a NOTIFY message over UDP.  The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CLTMOSMALL">
+<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGCHAN">
+<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<listitem><para>
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGERR">
+<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<listitem><para>
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error).  The reason for the error, given as a parameter in the message,
+will give more details.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGLOAD">
+<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<listitem><para>
+A debug message, output when the resolver configuration has been successfully
+loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGUPD">
+<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<listitem><para>
+A debug message, the configuration has been updated with the specified
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CREATED">
+<term>RESOLVER_CREATED main resolver object created</term>
+<listitem><para>
+A debug message, output when the Resolver() object has been created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNSMSGRCVD">
+<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<listitem><para>
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNSMSGSENT">
+<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<listitem><para>
+A debug message, this contains details of the response sent back to the querying
+system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FAILED">
+<term>RESOLVER_FAILED resolver failed, reason: %1</term>
+<listitem><para>
+This is an error message output when an unhandled exception is caught by the
+resolver.  All it can do is to shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FWDADDR">
+<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FWDQUERY">
+<term>RESOLVER_FWDQUERY processing forward query</term>
+<listitem><para>
+The received query has passed all checks and is being forwarded to upstream
+servers.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_HDRERR">
+<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<listitem><para>
+A debug message noting that an exception occurred during the processing of
+a received packet.  The packet has been dropped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_IXFR">
+<term>RESOLVER_IXFR IXFR request received</term>
+<listitem><para>
+The resolver received a NOTIFY message over TCP.  The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_LKTMOSMALL">
+<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NFYNOTAUTH">
+<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<listitem><para>
+The resolver received a NOTIFY message.  As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NORMQUERY">
+<term>RESOLVER_NORMQUERY processing normal query</term>
+<listitem><para>
+The received query has passed all checks and is being processed by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOROOTADDR">
+<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<listitem><para>
+A warning message during startup, indicates that no root addresses have been
+set.  This may be because the resolver will get them from a priming query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTIN">
+<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<listitem><para>
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTONEQUES">
+<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<listitem><para>
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message.  This is a malformed
+message, as a DNS query must contain only one question.  The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_OPCODEUNS">
+<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<listitem><para>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes).  It will return a message to the sender
+with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PARSEERR">
+<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<listitem><para>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded).  The message parameters give
+a textual description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PRINTMSG">
+<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<listitem><para>
+This message is logged when a "print_message" command is received over the
+command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PROTERR">
+<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<listitem><para>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded).  The message parameters give a textual
+description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUSETUP">
+<term>RESOLVER_QUSETUP query setup</term>
+<listitem><para>
+A debug message noting that the resolver is creating a RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUSHUT">
+<term>RESOLVER_QUSHUT query shutdown</term>
+<listitem><para>
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUTMOSMALL">
+<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<listitem><para>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECVMSG">
+<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<listitem><para>
+A debug message indicating that the resolver has received a message.  Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RETRYNEG">
+<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<listitem><para>
+An error message indicating that the resolver configuration has specified a
+negative retry count.  Only zero or positive values are valid.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_ROOTADDR">
+<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SERVICE">
+<term>RESOLVER_SERVICE service object created</term>
+<listitem><para>
+A debug message, output when the main service object (which handles the
+received queries) is created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SETPARAM">
+<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<listitem><para>
+A debug message, lists the parameters associated with the message.  These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers.  Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query.  Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</para><para>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the clent query might require a large number of queries to
+upstream nameservers.  Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout.  When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolve gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SHUTDOWN">
+<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
+<listitem><para>
+This information message is output when the resolver has shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTED">
+<term>RESOLVER_STARTED resolver started</term>
+<listitem><para>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTING">
+<term>RESOLVER_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNEXRESP">
+<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<listitem><para>
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+</para></listitem>
+</varlistentry>
+      </variablelist>
+    </para>
+  </chapter>
+</book>
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 9c52504..64136c1 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -16,7 +16,8 @@ endif
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 
-CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES  = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES += auth_messages.h auth_messages.cc
 
 man_MANS = b10-auth.8
 EXTRA_DIST = $(man_MANS) b10-auth.xml
@@ -34,16 +35,25 @@ auth.spec: auth.spec.pre
 spec_config.h: spec_config.h.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
 
-BUILT_SOURCES = spec_config.h
+auth_messages.h auth_messages.cc: auth_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/auth/auth_messages.mes
+
+BUILT_SOURCES = spec_config.h auth_messages.h auth_messages.cc
+
 pkglibexec_PROGRAMS = b10-auth
 b10_auth_SOURCES = query.cc query.h
 b10_auth_SOURCES += auth_srv.cc auth_srv.h
+b10_auth_SOURCES += auth_log.cc auth_log.h
 b10_auth_SOURCES += change_user.cc change_user.h
 b10_auth_SOURCES += auth_config.cc auth_config.h
 b10_auth_SOURCES += command.cc command.h
 b10_auth_SOURCES += common.h common.cc
 b10_auth_SOURCES += statistics.cc statistics.h
 b10_auth_SOURCES += main.cc
+
+nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
+EXTRA_DIST += auth_messages.mes
+
 b10_auth_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
 b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/auth_log.cc b/src/bin/auth/auth_log.cc
new file mode 100644
index 0000000..d41eaea
--- /dev/null
+++ b/src/bin/auth/auth_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the top-level component of b10-auth.
+
+#include "auth_log.h"
+
+namespace isc {
+namespace auth {
+
+isc::log::Logger auth_logger("auth");
+
+} // namespace auth
+} // namespace isc
+
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
new file mode 100644
index 0000000..5205624
--- /dev/null
+++ b/src/bin/auth/auth_log.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_LOG__H
+#define __AUTH_LOG__H
+
+#include <log/macros.h>
+#include <auth/auth_messages.h>
+
+namespace isc {
+namespace auth {
+
+/// \brief Auth Logging
+///
+/// Defines the levels used to output debug messages in the "auth" part of
+/// the b10-auth program.  Higher numbers equate to more verbose (and detailed)
+/// output.
+
+// Debug messages indicating normal startup are logged at this debug level.
+const int DBG_AUTH_START = 10;
+
+// Debug level used to log setting information (such as configuration changes).
+const int DBG_AUTH_OPS = 30;
+
+// Trace detailed operations, including errors raised when processing invalid
+// packets.  (These are not logged at severities of WARN or higher for fear
+// that a set of deliberately invalid packets set to the authoritative server
+// could overwhelm the logging.)
+const int DBG_AUTH_DETAIL = 50;
+
+// This level is used to log the contents of packets received and sent.
+const int DBG_AUTH_MESSAGES = 70;
+
+/// Define the logger for the "auth" module part of b10-auth.  We could define
+/// a logger in each file, but we would want to define a common name to avoid
+/// spelling mistakes, so it is just one small step from there to define a
+/// module-common logger.
+extern isc::log::Logger auth_logger;
+
+} // namespace nsas
+} // namespace isc
+
+#endif // __AUTH_LOG__H
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
new file mode 100644
index 0000000..8553d17
--- /dev/null
+++ b/src/bin/auth/auth_messages.mes
@@ -0,0 +1,260 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::auth
+
+% AUTH_AXFR_ERROR error handling AXFR request: %1
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+
+% AUTH_AXFR_UDP AXFR query received over UDP
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+
+% AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+
+% AUTH_CONFIG_CHANNEL_CREATED configuration session channel created
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_STARTED configuration session channel started
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+
+% AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+
+% AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+
+% AUTH_DATA_SOURCE data source database file: %1
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+
+% AUTH_DNS_SERVICES_CREATED DNS services created
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritiative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+
+% AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+
+% AUTH_LOAD_TSIG loading TSIG keys
+This is a debug message indicating that the authoritiative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_LOAD_ZONE loaded zone %1/%2
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+
+% AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+
+% AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+
+% AUTH_NO_STATS_SESSION session interface for statistics is not available
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+
+% AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+
+% AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+
+% AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+
+% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+
+% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+
+% AUTH_PACKET_RECEIVED message received:\n%1
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_PROCESS_FAIL message processing failure: %1
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+
+The server will return a SERVFAIL error code to the sender of the packet.
+However, this message indicates a potential error in the server.
+Please open a bug ticket for this issue.
+
+% AUTH_RECEIVED_COMMAND command '%1' received
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+
+% AUTH_RECEIVED_SENDSTATS command 'sendstats' received
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+
+% AUTH_RESPONSE_RECEIVED received response message, ignoring
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+
+% AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SERVER_CREATED server created
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+
+% AUTH_SERVER_FAILED server failed: %1
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+
+% AUTH_SERVER_STARTED server stated
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+
+% AUTH_SQLITE3 nothing to do for loading sqlite3
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+
+% AUTH_STATS_CHANNEL_CREATED STATS session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel.  It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_STATS_COMMS communication error in sending statistics data: %1
+An error was encountered when the authoritiative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+
+% AUTH_STATS_TIMEOUT timeout while sending statistics data: %1
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+
+% AUTH_STATS_TIMER_DISABLED statistics timer has been disabled
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+
+% AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+
+% AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+
+% AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process.  It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+
+% AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process.  It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+
+% AUTH_ZONEMGR_ERROR received error response from zone manager: %1
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+
+
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 9e01155..f29fd05 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -59,6 +59,7 @@
 #include <auth/auth_srv.h>
 #include <auth/query.h>
 #include <auth/statistics.h>
+#include <auth/auth_log.h>
 
 using namespace std;
 
@@ -104,7 +105,6 @@ public:
 
     /// These members are public because AuthSrv accesses them directly.
     ModuleCCSession* config_session_;
-    bool verbose_mode_;
     AbstractSession* xfrin_session_;
 
     /// In-memory data source.  Currently class IN only for simplicity.
@@ -143,11 +143,11 @@ private:
 
 AuthSrvImpl::AuthSrvImpl(const bool use_cache,
                          AbstractXfroutClient& xfrout_client) :
-    config_session_(NULL), verbose_mode_(false),
+    config_session_(NULL),
     xfrin_session_(NULL),
     memory_datasrc_class_(RRClass::IN()),
     statistics_timer_(io_service_),
-    counters_(verbose_mode_),
+    counters_(),
     keyring_(NULL),
     xfrout_connected_(false),
     xfrout_client_(xfrout_client)
@@ -251,7 +251,7 @@ public:
 
 void
 makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
-                 const Rcode& rcode, const bool verbose_mode,
+                 const Rcode& rcode, 
                  std::auto_ptr<TSIGContext> tsig_context =
                  std::auto_ptr<TSIGContext>())
 {
@@ -289,22 +289,9 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
     } else {
         message->toWire(renderer);
     }
-
-    if (verbose_mode) {
-        cerr << "[b10-auth] sending an error response (" <<
-            renderer.getLength() << " bytes):\n" << message->toText() << endl;
-    }
-}
-}
-
-void
-AuthSrv::setVerbose(const bool on) {
-    impl_->verbose_mode_ = on;
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
+              .arg(message->toText());
 }
-
-bool
-AuthSrv::getVerbose() const {
-    return (impl_->verbose_mode_);
 }
 
 IOService&
@@ -362,15 +349,12 @@ AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
         isc_throw(InvalidParameter,
                   "Memory data source is not supported for RR class "
                   << rrclass);
-    }
-    if (impl_->verbose_mode_) {
-        if (!impl_->memory_datasrc_ && memory_datasrc) {
-            cerr << "[b10-auth] Memory data source is enabled for class "
-                 << rrclass << endl;
-        } else if (impl_->memory_datasrc_ && !memory_datasrc) {
-            cerr << "[b10-auth] Memory data source is disabled for class "
-                 << rrclass << endl;
-        }
+    } else if (!impl_->memory_datasrc_ && memory_datasrc) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
+                  .arg(rrclass);
+    } else if (impl_->memory_datasrc_ && !memory_datasrc) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
+                  .arg(rrclass);
     }
     impl_->memory_datasrc_ = memory_datasrc;
 }
@@ -392,18 +376,13 @@ AuthSrv::setStatisticsTimerInterval(uint32_t interval) {
     }
     if (interval == 0) {
         impl_->statistics_timer_.cancel();
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_DISABLED);
     } else {
         impl_->statistics_timer_.setup(boost::bind(&AuthSrv::submitStatistics,
                                                    this),
                                        interval * 1000);
-    }
-    if (impl_->verbose_mode_) {
-        if (interval == 0) {
-            cerr << "[b10-auth] Disabled statistics timer" << endl;
-        } else {
-            cerr << "[b10-auth] Set statistics timer to " << interval
-                 << " seconds" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_SET)
+                  .arg(interval);
     }
 }
 
@@ -420,17 +399,13 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
 
         // Ignore all responses.
         if (message->getHeaderFlag(Message::HEADERFLAG_QR)) {
-            if (impl_->verbose_mode_) {
-                cerr << "[b10-auth] received unexpected response, ignoring"
-                     << endl;
-            }
+            LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RESPONSE_RECEIVED);
             server->resume(false);
             return;
         }
     } catch (const Exception& ex) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] DNS packet exception: " << ex.what() << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_HEADER_PARSE_FAIL)
+                  .arg(ex.what());
         server->resume(false);
         return;
     }
@@ -439,27 +414,21 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
         // Parse the message.
         message->fromWire(request_buffer);
     } catch (const DNSProtocolError& error) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] returning " <<  error.getRcode().toText()
-                 << ": " << error.what() << endl;
-        }
-        makeErrorMessage(message, buffer, error.getRcode(),
-                         impl_->verbose_mode_);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+                  .arg(error.getRcode().toText()).arg(error.what());
+        makeErrorMessage(message, buffer, error.getRcode());
         server->resume(true);
         return;
     } catch (const Exception& ex) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] returning SERVFAIL: " << ex.what() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(),
-                         impl_->verbose_mode_);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+                  .arg(ex.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL());
         server->resume(true);
         return;
     } // other exceptions will be handled at a higher layer.
 
-    if (impl_->verbose_mode_) {
-        cerr << "[b10-auth] received a message:\n" << message->toText() << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_PACKET_RECEIVED)
+              .arg(message->toText());
 
     // Perform further protocol-level validation.
     // TSIG first
@@ -481,20 +450,16 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
 
     bool sendAnswer = true;
     if (tsig_error != TSIGError::NOERROR()) {
-        makeErrorMessage(message, buffer, tsig_error.toRcode(),
-                         impl_->verbose_mode_, tsig_context);
+        makeErrorMessage(message, buffer, tsig_error.toRcode(), tsig_context);
     } else if (message->getOpcode() == Opcode::NOTIFY()) {
         sendAnswer = impl_->processNotify(io_message, message, buffer,
                                           tsig_context);
     } else if (message->getOpcode() != Opcode::QUERY()) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] unsupported opcode" << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::NOTIMP(),
-                         impl_->verbose_mode_, tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE)
+                  .arg(message->getOpcode().toText());
+        makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
     } else if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
-        makeErrorMessage(message, buffer, Rcode::FORMERR(),
-                         impl_->verbose_mode_, tsig_context);
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
     } else {
         ConstQuestionPtr question = *message->beginQuestion();
         const RRType &qtype = question->getType();
@@ -502,8 +467,7 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
             sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
                                                  tsig_context);
         } else if (qtype == RRType::IXFR()) {
-            makeErrorMessage(message, buffer, Rcode::NOTIMP(),
-                             impl_->verbose_mode_, tsig_context);
+            makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
         } else {
             sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
                                                    tsig_context);
@@ -550,11 +514,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
             data_sources_.doQuery(query);
         }
     } catch (const Exception& ex) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] Internal error, returning SERVFAIL: " <<
-                ex.what() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+        LOG_ERROR(auth_logger, AUTH_PROCESS_FAIL).arg(ex.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL());
         return (true);
     }
 
@@ -567,12 +528,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
     } else {
         message->toWire(renderer);
     }
-
-    if (verbose_mode_) {
-        cerr << "[b10-auth] sending a response ("
-             << renderer.getLength()
-             << " bytes):\n" << message->toText() << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_NORMAL_RESPONSE)
+              .arg(renderer.getLength()).arg(message->toText());
 
     return (true);
 }
@@ -586,11 +543,8 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
     incCounter(io_message.getSocket().getProtocol());
 
     if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] AXFR query over UDP isn't allowed" << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_UDP);
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
 
@@ -613,12 +567,9 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
             xfrout_connected_ = false;
         }
 
-        if (verbose_mode_) {
-            cerr << "[b10-auth] Error in handling XFR request: " << err.what()
-                 << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+                  .arg(err.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), tsig_context);
         return (true);
     }
 
@@ -633,22 +584,16 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
     // The incoming notify must contain exactly one question for SOA of the
     // zone name.
     if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
-        if (verbose_mode_) {
-                cerr << "[b10-auth] invalid number of questions in notify: "
-                     << message->getRRCount(Message::SECTION_QUESTION) << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_QUESTIONS)
+                  .arg(message->getRRCount(Message::SECTION_QUESTION));
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
     ConstQuestionPtr question = *message->beginQuestion();
     if (question->getType() != RRType::SOA()) {
-        if (verbose_mode_) {
-                cerr << "[b10-auth] invalid question RR type in notify: "
-                     << question->getType() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_RRTYPE)
+                  .arg(question->getType().toText());
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
 
@@ -664,10 +609,7 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
     // silent about such cases, but there doesn't seem to be anything we can
     // improve at the primary server side by sending an error anyway.
     if (xfrin_session_ == NULL) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] "
-                "session interface for xfrin is not available" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
         return (false);
     }
 
@@ -693,16 +635,12 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
         int rcode;
         parsed_answer = parseAnswer(rcode, answer);
         if (rcode != 0) {
-            if (verbose_mode_) {
-                cerr << "[b10-auth] failed to notify Zonemgr: "
-                     << parsed_answer->str() << endl;
-            }
+            LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
+                      .arg(parsed_answer->str());
             return (false);
         }
     } catch (const Exception& ex) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] failed to notify Zonemgr: " << ex.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_ZONEMGR_COMMS).arg(ex.what());
         return (false);
     }
 
@@ -762,10 +700,7 @@ AuthSrvImpl::setDbFile(ConstElementPtr config) {
     } else {
         return (answer);
     }
-
-    if (verbose_mode_) {
-        cerr << "[b10-auth] Data source database file: " << db_file_ << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_DATA_SOURCE).arg(db_file_);
 
     // create SQL data source
     // Note: the following step is tricky to be exception-safe and to ensure
@@ -795,9 +730,7 @@ AuthSrv::updateConfig(ConstElementPtr new_config) {
         }
         return (impl_->setDbFile(new_config));
     } catch (const isc::Exception& error) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] error: " << error.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_CONFIG_UPDATE_FAIL).arg(error.what());
         return (isc::config::createAnswer(1, error.what()));
     }
 }
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 19c97b5..7eede97 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -124,27 +124,6 @@ public:
                         isc::util::OutputBufferPtr buffer,
                         isc::asiodns::DNSServer* server);
 
-    /// \brief Set verbose flag
-    ///
-    /// \param on The new value of the verbose flag
-
-    /// \brief Enable or disable verbose logging.
-    ///
-    /// This method never throws an exception.
-    ///
-    /// \param on \c true to enable verbose logging; \c false to disable
-    /// verbose logging.
-    void setVerbose(const bool on);
-
-    /// \brief Returns the logging verbosity of the \c AuthSrv object.
-    ///
-    /// This method never throws an exception.
-    ///
-    /// \return \c true if verbose logging is enabled; otherwise \c false.
-
-    /// \brief Get the current value of the verbose flag
-    bool getVerbose() const;
-
     /// \brief Updates the data source for the \c AuthSrv object.
     ///
     /// This method installs or replaces the data source that the \c AuthSrv
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 77d171f..cf3fe4a 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -12,6 +12,9 @@ query_bench_SOURCES += ../query.h  ../query.cc
 query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
 query_bench_SOURCES += ../auth_config.h ../auth_config.cc
 query_bench_SOURCES += ../statistics.h ../statistics.cc
+query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+
+nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
 
 query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
 query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index eafcae8..fe3d729 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -27,16 +27,18 @@
 
 #include <config/ccsession.h>
 
+#include <auth/auth_log.h>
 #include <auth/auth_srv.h>
 #include <auth/command.h>
 
-using namespace std;
-using boost::shared_ptr;
 using boost::scoped_ptr;
-using namespace isc::dns;
+using boost::shared_ptr;
+using namespace isc::auth;
+using namespace isc::config;
 using namespace isc::data;
 using namespace isc::datasrc;
-using namespace isc::config;
+using namespace isc::dns;
+using namespace std;
 
 namespace {
 /// An exception that is thrown if an error occurs while handling a command
@@ -115,9 +117,7 @@ public:
 class SendStatsCommand : public AuthCommand {
 public:
     virtual void exec(AuthSrv& server, isc::data::ConstElementPtr) {
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] command 'sendstats' received" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_SENDSTATS);
         server.submitStatistics();
     }
 };
@@ -140,11 +140,8 @@ public:
                                                       oldzone->getOrigin()));
         newzone->load(oldzone->getFileName());
         oldzone->swap(*newzone);
-
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] Loaded zone '" << newzone->getOrigin()
-                 << "'/" << newzone->getClass() << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
+                  .arg(newzone->getOrigin()).arg(newzone->getClass());
     }
 
 private:
@@ -164,10 +161,7 @@ private:
         ConstElementPtr datasrc_elem = args->get("datasrc");
         if (datasrc_elem) {
             if (datasrc_elem->stringValue() == "sqlite3") {
-                if (server.getVerbose()) {
-                    cerr << "[b10-auth] Nothing to do for loading sqlite3"
-                         << endl;
-                }
+                LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_SQLITE3);
                 return (false);
             } else if (datasrc_elem->stringValue() != "memory") {
                 // (note: at this point it's guaranteed that datasrc_elem
@@ -233,18 +227,13 @@ ConstElementPtr
 execAuthServerCommand(AuthSrv& server, const string& command_id,
                       ConstElementPtr args)
 {
-    if (server.getVerbose()) {
-        cerr << "[b10-auth] Received '" << command_id << "' command" << endl;
-    }
-
+    LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_COMMAND).arg(command_id);
     try {
         scoped_ptr<AuthCommand>(createAuthCommand(command_id))->exec(server,
                                                                      args);
     } catch (const isc::Exception& ex) {
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] Command '" << command_id
-                 << "' execution failed: " << ex.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_COMMAND_FAILED).arg(command_id)
+                                                   .arg(ex.what());
         return (createAnswer(1, ex.what()));
     }
 
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 0324c6e..c8f6762 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,25 +44,26 @@
 #include <auth/command.h>
 #include <auth/change_user.h>
 #include <auth/auth_srv.h>
+#include <auth/auth_log.h>
 #include <asiodns/asiodns.h>
 #include <asiolink/asiolink.h>
-#include <log/dummylog.h>
+#include <log/logger_support.h>
 #include <server_common/keyring.h>
 
 using namespace std;
-using namespace isc::data;
+using namespace isc::asiodns;
+using namespace isc::asiolink;
+using namespace isc::auth;
 using namespace isc::cc;
 using namespace isc::config;
+using namespace isc::data;
 using namespace isc::dns;
+using namespace isc::log;
 using namespace isc::util;
 using namespace isc::xfr;
-using namespace isc::asiolink;
-using namespace isc::asiodns;
 
 namespace {
 
-bool verbose_mode = false;
-
 /* need global var for config/command handlers.
  * todo: turn this around, and put handlers in the authserver
  * class itself? */
@@ -88,6 +89,7 @@ usage() {
     cerr << "\t-v: verbose output" << endl;
     exit(1);
 }
+
 } // end of anonymous namespace
 
 int
@@ -95,6 +97,7 @@ main(int argc, char* argv[]) {
     int ch;
     const char* uid = NULL;
     bool cache = true;
+    bool verbose = false;
 
     while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
         switch (ch) {
@@ -105,8 +108,7 @@ main(int argc, char* argv[]) {
             uid = optarg;
             break;
         case 'v':
-            verbose_mode = true;
-            isc::log::denabled = true;
+            verbose = true;
             break;
         case '?':
         default:
@@ -118,6 +120,11 @@ main(int argc, char* argv[]) {
         usage();
     }
 
+    // Initialize logging.  If verbose, we'll use maximum verbosity.
+    isc::log::initLogger("b10-auth",
+                         (verbose ? isc::log::DEBUG : isc::log::INFO),
+                         isc::log::MAX_DEBUG_LEVEL, NULL);
+
     int ret = 0;
 
     // XXX: we should eventually pass io_service here.
@@ -138,8 +145,7 @@ main(int argc, char* argv[]) {
         }
 
         auth_server = new AuthSrv(cache, xfrout_client);
-        auth_server->setVerbose(verbose_mode);
-        cout << "[b10-auth] Server created." << endl;
+        LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
 
         SimpleCallback* checkin = auth_server->getCheckinProvider();
         IOService& io_service = auth_server->getIOService();
@@ -148,10 +154,10 @@ main(int argc, char* argv[]) {
 
         DNSService dns_service(io_service, checkin, lookup, answer);
         auth_server->setDNSService(dns_service);
-        cout << "[b10-auth] DNSServices created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
 
         cc_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Configuration session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
 
         // We delay starting listening to new commands/config just before we
         // go into the main loop to avoid confusion due to mixture of
@@ -161,19 +167,19 @@ main(int argc, char* argv[]) {
         config_session = new ModuleCCSession(specfile, *cc_session,
                                              my_config_handler,
                                              my_command_handler, false);
-        cout << "[b10-auth] Configuration channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
 
         xfrin_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Xfrin session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
         xfrin_session->establish(NULL);
         xfrin_session_established = true;
-        cout << "[b10-auth] Xfrin session channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
 
         statistics_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Statistics session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_CREATED);
         statistics_session->establish(NULL);
         statistics_session_established = true;
-        cout << "[b10-auth] Statistics session channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_ESTABLISHED);
 
         auth_server->setXfrinSession(xfrin_session);
         auth_server->setStatisticsSession(statistics_session);
@@ -182,33 +188,34 @@ main(int argc, char* argv[]) {
         // all initial configurations, but as a short term workaround we
         // handle the traditional "database_file" setup by directly calling
         // updateConfig().
-        // if server load configure failed, we won't exit, give user second chance
-        // to correct the configure.
+        // if server load configure failed, we won't exit, give user second
+        // chance to correct the configure.
         auth_server->setConfigSession(config_session);
         try {
             configureAuthServer(*auth_server, config_session->getFullConfig());
             auth_server->updateConfig(ElementPtr());
         } catch (const AuthConfigError& ex) {
-            cout << "[bin10-auth] Server load config failed:" << ex.what() << endl;
+            LOG_ERROR(auth_logger, AUTH_CONFIG_LOAD_FAIL).arg(ex.what());
         }
 
         if (uid != NULL) {
             changeUser(uid);
         }
 
-        cout << "[b10-auth] Loading TSIG keys" << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_LOAD_TSIG);
         isc::server_common::initKeyring(*config_session);
         auth_server->setTSIGKeyRing(&isc::server_common::keyring);
 
         // Now start asynchronous read.
         config_session->start();
-        cout << "[b10-auth] Configuration channel started." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_STARTED);
 
-        cout << "[b10-auth] Server started." << endl;
+        // Successfully initialized.
+        LOG_INFO(auth_logger, AUTH_SERVER_STARTED);
         io_service.run();
 
     } catch (const std::exception& ex) {
-        cerr << "[b10-auth] Server failed: " << ex.what() << endl;
+        LOG_FATAL(auth_logger, AUTH_SERVER_FAILED).arg(ex.what());
         ret = 1;
     }
 
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 415aa14..76e5007 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -13,6 +13,7 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <auth/statistics.h>
+#include <auth/auth_log.h>
 
 #include <cc/data.h>
 #include <cc/session.h>
@@ -20,6 +21,8 @@
 #include <sstream>
 #include <iostream>
 
+using namespace isc::auth;
+
 // TODO: We need a namespace ("auth_server"?) to hold
 // AuthSrv and AuthCounters.
 
@@ -29,10 +32,7 @@ private:
     AuthCountersImpl(const AuthCountersImpl& source);
     AuthCountersImpl& operator=(const AuthCountersImpl& source);
 public:
-    // References verbose_mode flag in AuthSrvImpl
-    // TODO: Fix this short term workaround for logging
-    // after we have logging framework
-    AuthCountersImpl(const bool& verbose_mode);
+    AuthCountersImpl();
     ~AuthCountersImpl();
     void inc(const AuthCounters::CounterType type);
     bool submitStatistics() const;
@@ -42,15 +42,13 @@ public:
 private:
     std::vector<uint64_t> counters_;
     isc::cc::AbstractSession* statistics_session_;
-    const bool& verbose_mode_;
 };
 
-AuthCountersImpl::AuthCountersImpl(const bool& verbose_mode) :
+AuthCountersImpl::AuthCountersImpl() :
     // initialize counter
     // size: AuthCounters::COUNTER_TYPES, initial value: 0
     counters_(AuthCounters::COUNTER_TYPES, 0),
-    statistics_session_(NULL),
-    verbose_mode_(verbose_mode)
+    statistics_session_(NULL)
 {}
 
 AuthCountersImpl::~AuthCountersImpl()
@@ -64,11 +62,7 @@ AuthCountersImpl::inc(const AuthCounters::CounterType type) {
 bool
 AuthCountersImpl::submitStatistics() const {
     if (statistics_session_ == NULL) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "session interface for statistics"
-                      << " is not available" << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_NO_STATS_SESSION);
         return (false);
     }
     std::stringstream statistics_string;
@@ -95,18 +89,10 @@ AuthCountersImpl::submitStatistics() const {
         // currently it just returns empty message
         statistics_session_->group_recvmsg(env, answer, false, seq);
     } catch (const isc::cc::SessionError& ex) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "communication error in sending statistics data: "
-                      << ex.what() << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_STATS_COMMS).arg(ex.what());
         return (false);
     } catch (const isc::cc::SessionTimeout& ex) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "timeout happened while sending statistics data: "
-                      << ex.what() << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_STATS_TIMEOUT).arg(ex.what());
         return (false);
     }
     return (true);
@@ -125,8 +111,7 @@ AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
     return (counters_.at(type));
 }
 
-AuthCounters::AuthCounters(const bool& verbose_mode) :
-    impl_(new AuthCountersImpl(verbose_mode))
+AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
 {}
 
 AuthCounters::~AuthCounters() {
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 9e5240e..5bf6436 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -61,15 +61,10 @@ public:
     };
     /// The constructor.
     ///
-    /// \param verbose_mode reference to verbose_mode_ of AuthSrvImpl
-    ///
     /// This constructor is mostly exception free. But it may still throw
     /// a standard exception if memory allocation fails inside the method.
     ///
-    /// \todo Fix this short term workaround for logging
-    /// after we have logging framework.
-    ///
-    AuthCounters(const bool& verbose_mode);
+    AuthCounters();
     /// The destructor.
     ///
     /// This method never throws an exception.
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index a4620f5..71520c2 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -22,6 +22,7 @@ TESTS += run_unittests
 run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
 run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
 run_unittests_SOURCES += ../auth_srv.h ../auth_srv.cc
+run_unittests_SOURCES += ../auth_log.h ../auth_log.cc
 run_unittests_SOURCES += ../query.h ../query.cc
 run_unittests_SOURCES += ../change_user.h ../change_user.cc
 run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
@@ -36,6 +37,9 @@ run_unittests_SOURCES += query_unittest.cc
 run_unittests_SOURCES += change_user_unittest.cc
 run_unittests_SOURCES += statistics_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
+
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 run_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index d922901..2b20d65 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -190,15 +190,6 @@ TEST_F(AuthSrvTest, unsupportedRequest) {
     unsupportedRequest();
 }
 
-// Simple API check
-TEST_F(AuthSrvTest, verbose) {
-    EXPECT_FALSE(server.getVerbose());
-    server.setVerbose(true);
-    EXPECT_TRUE(server.getVerbose());
-    server.setVerbose(false);
-    EXPECT_FALSE(server.getVerbose());
-}
-
 // Multiple questions.  Should result in FORMERR.
 TEST_F(AuthSrvTest, multiQuestion) {
     multiQuestion();
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 062b70d..9a3dded 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -69,13 +69,12 @@ private:
     };
 
 protected:
-    AuthCountersTest() : verbose_mode_(false), counters(verbose_mode_) {
+    AuthCountersTest() : counters() {
         counters.setStatisticsSession(&statistics_session_);
     }
     ~AuthCountersTest() {
     }
     MockSession statistics_session_;
-    bool verbose_mode_;
     AuthCounters counters;
 };
 
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 89301bd..3a36e01 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -30,7 +30,7 @@ export PYTHONPATH
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 34d809a..3d8d57a 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -4,6 +4,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = bind10_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -13,6 +20,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
 	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
 		$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
old mode 100644
new mode 100755
index 730ce1e..8f6ba59
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -23,6 +23,14 @@ BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
 PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
 export PYTHONPATH
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	export @ENV_LIBRARY_PATH@
+fi
+
 B10_FROM_SOURCE=@abs_top_srcdir@
 export B10_FROM_SOURCE
 
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index d2bb90f..891d413 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = bindctl_test.py cmdparse_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin  \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/cfgmgr/plugins/b10logging.py b/src/bin/cfgmgr/plugins/b10logging.py
index 6af3f66..e288c6d 100644
--- a/src/bin/cfgmgr/plugins/b10logging.py
+++ b/src/bin/cfgmgr/plugins/b10logging.py
@@ -48,6 +48,19 @@ def check(config):
         for logger in config['loggers']:
             # name should always be present
             name = logger['name']
+            # report an error if name starts with * but not *.,
+            # or if * is not the first character.
+            # TODO: we might want to also warn or error if the
+            # logger name is not an existing module, but we can't
+            # really tell that from here at this point
+            star_pos = name.find('*')
+            if star_pos > 0 or\
+               name == '*.' or\
+               (star_pos == 0 and len(name) > 1 and name[1] != '.'):
+                errors.append("Bad logger name: '" + name + "': * can "
+                              "only be used instead of the full "
+                              "first-level name, e.g. '*' or "
+                              "'*.subsystem'")
 
             if 'severity' in logger and\
                logger['severity'].lower() not in ALLOWED_SEVERITIES:
@@ -71,11 +84,11 @@ def check(config):
                                'output' in output_option and\
                                output_option['output'] not in ALLOWED_STREAMS:
                                 errors.append("bad output for logger " + name +
-                                              ": " + output_option['stream'] +
+                                              ": " + output_option['output'] +
                                               ", must be stdout or stderr")
                             elif destination == "file" and\
-                                 'output' not in output_option or\
-                                 output_option['output'] == "":
+                                 ('output' not in output_option or\
+                                  output_option['output'] == ""):
                                     errors.append("destination set to file but "
                                                   "output not set to any "
                                                   "filename for logger "
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 896dab7..07b7a85 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -1,5 +1,5 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = tsig_keys_test.py
+PYTESTS = tsig_keys_test.py logging_test.py
 
 EXTRA_DIST = $(PYTESTS)
 
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/cfgmgr/plugins/tests/logging_test.py b/src/bin/cfgmgr/plugins/tests/logging_test.py
new file mode 100644
index 0000000..818a596
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/logging_test.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Make sure we can load the module, put it into path
+import sys
+import os
+sys.path.extend(os.environ["B10_TEST_PLUGIN_DIR"].split(':'))
+
+import b10logging
+import unittest
+
+class LoggingConfCheckTest(unittest.TestCase):
+    def test_load(self):
+        """
+        Checks the entry point returns the correct values.
+        """
+        (spec, check) = b10logging.load()
+        # It returns the checking function
+        self.assertEqual(check, b10logging.check)
+        # The plugin stores it's spec
+        self.assertEqual(spec, b10logging.spec)
+
+    def test_logger_conf(self):
+        self.assertEqual(None,
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'DEBUG',
+                                            'debuglevel': 50,
+                                            'output_options':
+                                            [{'destination': 'file',
+                                              'output': '/some/file'
+                                            }]
+                                           },
+                                           {'name': 'b10-resolver',
+                                            'severity': 'WARN',
+                                            'additive': True,
+                                            'output_options':
+                                            [{'destination': 'console',
+                                              'output': 'stderr',
+                                              'flush': True
+                                            }]
+                                           },
+                                           {'name': 'b10-resolver.resolver',
+                                            'severity': 'ERROR',
+                                            'output_options': []
+                                           },
+                                           {'name': '*.cache',
+                                            'severity': 'INFO'
+                                           }
+                                          ]}))
+    def do_bad_name_test(self, name):
+        err_str = "Bad logger name: '" + name + "': * can only be "\
+                  "used instead of the full first-level name, e.g. "\
+                  "'*' or '*.subsystem'"
+        self.assertEqual(err_str,
+                         b10logging.check({'loggers':
+                                          [{'name': name,
+                                            'severity': 'DEBUG'},
+                                          ]}))
+        
+    def test_logger_bad_name(self):
+        self.do_bad_name_test("*.")
+        self.do_bad_name_test("*foo")
+        self.do_bad_name_test("*foo.lib")
+        self.do_bad_name_test("foo*")
+        self.do_bad_name_test("foo*.lib")
+
+    def test_logger_bad_severity(self):
+        self.assertEqual('bad severity value for logger *: BADVAL',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'BADVAL'}]}))
+
+    def test_logger_bad_destination(self):
+        self.assertEqual('bad destination for logger *: baddest',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'baddest' }
+                                            ]}]}))
+
+    def test_logger_bad_console_output(self):
+        self.assertEqual('bad output for logger *: bad_output, must be stdout or stderr',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'console',
+                                              'output': 'bad_output'
+                                            }
+                                            ]}]}))
+
+    def test_logger_bad_file_output(self):
+        self.assertEqual('destination set to file but output not set to any filename for logger *',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'file' }
+                                            ]}]}))
+
+    def test_logger_bad_syslog_output(self):
+        self.assertEqual('destination set to syslog but output not set to any facility for logger *',
+                         b10logging.check({'loggers':
+                                          [{'name': '*',
+                                            'severity': 'INFO',
+                                            'output_options': [
+                                            { 'destination': 'syslog' }
+                                            ]}]}))
+
+    def test_logger_bad_type(self):
+        self.assertEqual('123 should be a string',
+                         b10logging.check({'loggers':
+                                          [{'name': 123,
+                                            'severity': 'INFO'}]}))
+        self.assertEqual('123 should be a string',
+                         b10logging.check({'loggers':
+                                          [{'name': 'bind10',
+                                            'severity': 123}]}))
+
+if __name__ == '__main__':
+        unittest.main()
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index f6fdd13..9f80e5d 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = b10-cfgmgr_test.py
 
 EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -13,6 +20,7 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	env TESTDATA_PATH=$(abs_srcdir)/testdata \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 6a4d7d4..e4ec9d4 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = cmdctl_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
 	CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
 	CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
old mode 100644
new mode 100755
index b7ac19f..95de396
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -21,6 +21,14 @@ export PYTHON_EXEC
 PYTHONPATH=@abs_top_builddir@/src/lib/python
 export PYTHONPATH
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	export @ENV_LIBRARY_PATH@
+fi
+
 BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
 export BIND10_MSGQ_SOCKET_FILE
 
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index a90cab2..3507bfa 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,8 +13,15 @@ EXTRA_DIST += ttl2.db
 EXTRA_DIST += ttlext.db
 EXTRA_DIST += example.db
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # TODO: maybe use TESTS?
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 	echo Running test: correct_test.sh 
-	$(SHELL) $(abs_builddir)/correct_test.sh
+	$(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/correct_test.sh
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index bbeec07..87bb1cf 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,8 +12,15 @@ EXTRA_DIST += keyerror3.db
 EXTRA_DIST += originerr1.db
 EXTRA_DIST += originerr2.db
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # TODO: use TESTS ?
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 	echo Running test: error_test.sh
-	$(SHELL) $(abs_builddir)/error_test.sh
+	$(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/error_test.sh
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 0bbb964..50c1e6e 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = msgq_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/resolver/Makefile.am b/src/bin/resolver/Makefile.am
index e826081..bce8307 100644
--- a/src/bin/resolver/Makefile.am
+++ b/src/bin/resolver/Makefile.am
@@ -20,10 +20,10 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 CLEANFILES  = *.gcno *.gcda
 CLEANFILES += resolver.spec spec_config.h
-CLEANFILES += resolverdef.cc resolverdef.h
+CLEANFILES += resolver_messages.cc resolver_messages.h
 
 man_MANS = b10-resolver.8
-EXTRA_DIST = $(man_MANS) b10-resolver.xml resolverdef.mes
+EXTRA_DIST = $(man_MANS) b10-resolver.xml resolver_messages.mes
 
 if ENABLE_MAN
 
@@ -39,11 +39,11 @@ spec_config.h: spec_config.h.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
 
 # Define rule to build logging source files from message file
-resolverdef.h resolverdef.cc: resolverdef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/resolver/resolverdef.mes
+resolver_messages.h resolver_messages.cc: resolver_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/resolver/resolver_messages.mes
 
 
-BUILT_SOURCES = spec_config.h resolverdef.cc resolverdef.h
+BUILT_SOURCES = spec_config.h resolver_messages.cc resolver_messages.h
 
 pkglibexec_PROGRAMS = b10-resolver
 b10_resolver_SOURCES = resolver.cc resolver.h
@@ -53,7 +53,7 @@ b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/change_user.h
 b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/common.h
 b10_resolver_SOURCES += main.cc
 
-nodist_b10_resolver_SOURCES = resolverdef.cc resolverdef.h
+nodist_b10_resolver_SOURCES = resolver_messages.cc resolver_messages.h
 
 
 b10_resolver_LDADD =  $(top_builddir)/src/lib/dns/libdns++.la
diff --git a/src/bin/resolver/main.cc b/src/bin/resolver/main.cc
index 530f689..e7cc4cf 100644
--- a/src/bin/resolver/main.cc
+++ b/src/bin/resolver/main.cc
@@ -80,7 +80,7 @@ my_command_handler(const string& command, ConstElementPtr args) {
     ConstElementPtr answer = createAnswer();
 
     if (command == "print_message") {
-        LOG_INFO(resolver_logger, RESOLVER_PRINTMSG).arg(args);
+        LOG_INFO(resolver_logger, RESOLVER_PRINT_COMMAND).arg(args);
         /* let's add that message to our answer as well */
         answer = createAnswer(0, args);
     } else if (command == "shutdown") {
@@ -203,14 +203,14 @@ main(int argc, char* argv[]) {
         
         DNSService dns_service(io_service, checkin, lookup, answer);
         resolver->setDNSService(dns_service);
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_SERVICE);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_SERVICE_CREATED);
 
         cc_session = new Session(io_service.get_io_service());
         config_session = new ModuleCCSession(specfile, *cc_session,
                                              my_config_handler,
                                              my_command_handler,
                                              true, true);
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIGCHAN);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_CHANNEL);
 
         // FIXME: This does not belong here, but inside Boss
         if (uid != NULL) {
@@ -218,7 +218,7 @@ main(int argc, char* argv[]) {
         }
 
         resolver->setConfigSession(config_session);
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIGLOAD);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_LOADED);
 
         LOG_INFO(resolver_logger, RESOLVER_STARTED);
         io_service.run();
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index 934fbdf..68c1b20 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -83,7 +83,7 @@ public:
                     isc::cache::ResolverCache& cache)
     {
         assert(!rec_query_); // queryShutdown must be called first
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUSETUP);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUERY_SETUP);
         rec_query_ = new RecursiveQuery(dnss, 
                                         nsas, cache,
                                         upstream_,
@@ -99,7 +99,8 @@ public:
         // (this is not a safety check, just to prevent logging of
         // actions that are not performed
         if (rec_query_) {
-            LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUSHUT);
+            LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT,
+                      RESOLVER_QUERY_SHUTDOWN);
             delete rec_query_;
             rec_query_ = NULL;
         }
@@ -112,7 +113,7 @@ public:
         if (dnss) {
             if (!upstream_.empty()) {
                 BOOST_FOREACH(const AddressPair& address, upstream) {
-                    LOG_INFO(resolver_logger, RESOLVER_FWDADDR)
+                    LOG_INFO(resolver_logger, RESOLVER_FORWARD_ADDRESS)
                              .arg(address.first).arg(address.second);
                 }
             } else {
@@ -128,11 +129,11 @@ public:
         if (dnss) {
             if (!upstream_root_.empty()) {
                 BOOST_FOREACH(const AddressPair& address, upstream_root) {
-                    LOG_INFO(resolver_logger, RESOLVER_ROOTADDR)
+                    LOG_INFO(resolver_logger, RESOLVER_SET_ROOT_ADDRESS)
                              .arg(address.first).arg(address.second);
                 }
             } else {
-                LOG_WARN(resolver_logger, RESOLVER_NOROOTADDR);
+                LOG_WARN(resolver_logger, RESOLVER_NO_ROOT_ADDRESS);
             }
         }
     }
@@ -302,7 +303,8 @@ public:
 
         answer_message->toWire(renderer);
 
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL, RESOLVER_DNSMSGSENT)
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+                  RESOLVER_DNS_MESSAGE_SENT)
                   .arg(renderer.getLength()).arg(*answer_message);
     }
 };
@@ -395,13 +397,13 @@ Resolver::processMessage(const IOMessage& io_message,
 
         // Ignore all responses.
         if (query_message->getHeaderFlag(Message::HEADERFLAG_QR)) {
-            LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXRESP);
+            LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXPECTED_RESPONSE);
             server->resume(false);
             return;
         }
 
     } catch (const Exception& ex) {
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HDRERR)
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HEADER_ERROR)
                   .arg(ex.what());
         server->resume(false);
         return;
@@ -411,14 +413,14 @@ Resolver::processMessage(const IOMessage& io_message,
     try {
         query_message->fromWire(request_buffer);
     } catch (const DNSProtocolError& error) {
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTERR)
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTOCOL_ERROR)
                   .arg(error.what()).arg(error.getRcode());
         makeErrorMessage(query_message, answer_message,
                          buffer, error.getRcode());
         server->resume(true);
         return;
     } catch (const Exception& ex) {
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTERR)
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_MESSAGE_ERROR)
                   .arg(ex.what()).arg(Rcode::SERVFAIL());
         makeErrorMessage(query_message, answer_message,
                          buffer, Rcode::SERVFAIL());
@@ -429,8 +431,8 @@ Resolver::processMessage(const IOMessage& io_message,
     // Note:  there appears to be no LOG_DEBUG for a successfully-received
     // message.  This is not an oversight - it is handled below.  In the
     // meantime, output the full message for debug purposes (if requested).
-    LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL, RESOLVER_DNSMSGRCVD)
-              .arg(*query_message);
+    LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+              RESOLVER_DNS_MESSAGE_RECEIVED).arg(*query_message);
 
     // Perform further protocol-level validation.
     bool sendAnswer = true;
@@ -439,20 +441,22 @@ Resolver::processMessage(const IOMessage& io_message,
         makeErrorMessage(query_message, answer_message,
                          buffer, Rcode::NOTAUTH());
         // Notify arrived, but we are not authoritative.
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NFYNOTAUTH);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+                  RESOLVER_NOTIFY_RECEIVED);
 
     } else if (query_message->getOpcode() != Opcode::QUERY()) {
 
         // Unsupported opcode.
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_OPCODEUNS)
-                  .arg(query_message->getOpcode());
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+                  RESOLVER_UNSUPPORTED_OPCODE).arg(query_message->getOpcode());
         makeErrorMessage(query_message, answer_message,
                          buffer, Rcode::NOTIMP());
 
     } else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
 
         // Not one question
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NOTONEQUES)
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+                  RESOLVER_NOT_ONE_QUESTION)
                   .arg(query_message->getRRCount(Message::SECTION_QUESTION));
         makeErrorMessage(query_message, answer_message,
                          buffer, Rcode::FORMERR());
@@ -464,14 +468,14 @@ Resolver::processMessage(const IOMessage& io_message,
 
                 // Can't process AXFR request receoved over UDP
                 LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
-                          RESOLVER_AXFRUDP);
+                          RESOLVER_AXFR_UDP);
                 makeErrorMessage(query_message, answer_message,
                                  buffer, Rcode::FORMERR());
             } else {
 
                 // ... or over TCP for that matter
                 LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
-                          RESOLVER_AXFRTCP);
+                          RESOLVER_AXFR_TCP);
                 makeErrorMessage(query_message, answer_message,
                                  buffer, Rcode::NOTIMP());
             }
@@ -485,7 +489,7 @@ Resolver::processMessage(const IOMessage& io_message,
         } else if (question->getClass() != RRClass::IN()) {
 
             // Non-IN message received, refuse it.
-            LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NOTIN)
+            LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NON_IN_PACKET)
                       .arg(question->getClass());
             makeErrorMessage(query_message, answer_message,
                              buffer, Rcode::REFUSED());
@@ -518,21 +522,21 @@ ResolverImpl::processNormalQuery(ConstMessagePtr query_message,
 {
     if (upstream_.empty()) {
         // Processing normal query
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_NORMQUERY);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_NORMAL_QUERY);
         ConstQuestionPtr question = *query_message->beginQuestion();
         rec_query_->resolve(*question, answer_message, buffer, server);
 
     } else {
 
         // Processing forward query
-        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_FWDQUERY);
+        LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_FORWARD_QUERY);
         rec_query_->forward(query_message, answer_message, buffer, server);
     }
 }
 
 ConstElementPtr
 Resolver::updateConfig(ConstElementPtr config) {
-    LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIGUPD)
+    LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIG_UPDATED)
               .arg(*config);
 
     try {
@@ -560,7 +564,8 @@ Resolver::updateConfig(ConstElementPtr config) {
             // check for us
             qtimeout = qtimeoutE->intValue();
             if (qtimeout < -1) {
-                LOG_ERROR(resolver_logger, RESOLVER_QUTMOSMALL).arg(qtimeout);
+                LOG_ERROR(resolver_logger, RESOLVER_QUERY_TIME_SMALL)
+                          .arg(qtimeout);
                 isc_throw(BadValue, "Query timeout too small");
             }
             set_timeouts = true;
@@ -568,7 +573,8 @@ Resolver::updateConfig(ConstElementPtr config) {
         if (ctimeoutE) {
             ctimeout = ctimeoutE->intValue();
             if (ctimeout < -1) {
-                LOG_ERROR(resolver_logger, RESOLVER_CLTMOSMALL).arg(ctimeout);
+                LOG_ERROR(resolver_logger, RESOLVER_CLIENT_TIME_SMALL)
+                          .arg(ctimeout);
                 isc_throw(BadValue, "Client timeout too small");
             }
             set_timeouts = true;
@@ -576,7 +582,8 @@ Resolver::updateConfig(ConstElementPtr config) {
         if (ltimeoutE) {
             ltimeout = ltimeoutE->intValue();
             if (ltimeout < -1) {
-                LOG_ERROR(resolver_logger, RESOLVER_LKTMOSMALL).arg(ltimeout);
+                LOG_ERROR(resolver_logger, RESOLVER_LOOKUP_TIME_SMALL)
+                          .arg(ltimeout);
                 isc_throw(BadValue, "Lookup timeout too small");
             }
             set_timeouts = true;
@@ -586,7 +593,7 @@ Resolver::updateConfig(ConstElementPtr config) {
             // _after_ the comparison (as opposed to before it for the timeouts)
             // because "retries" is unsigned.
             if (retriesE->intValue() < 0) {
-                LOG_ERROR(resolver_logger, RESOLVER_RETRYNEG)
+                LOG_ERROR(resolver_logger, RESOLVER_NEGATIVE_RETRIES)
                           .arg(retriesE->intValue());
                 isc_throw(BadValue, "Negative number of retries");
             }
@@ -633,7 +640,7 @@ Resolver::updateConfig(ConstElementPtr config) {
     } catch (const isc::Exception& error) {
 
         // Configuration error
-        LOG_ERROR(resolver_logger, RESOLVER_CONFIGERR).arg(error.what());
+        LOG_ERROR(resolver_logger, RESOLVER_CONFIG_ERROR).arg(error.what());
         return (isc::config::createAnswer(1, error.what()));
     }
 }
@@ -673,7 +680,7 @@ Resolver::setListenAddresses(const AddressList& addresses) {
 void
 Resolver::setTimeouts(int query_timeout, int client_timeout,
                       int lookup_timeout, unsigned retries) {
-    LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_SETPARAM)
+    LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_SET_PARAMS)
               .arg(query_timeout).arg(client_timeout).arg(lookup_timeout)
               .arg(retries);
 
diff --git a/src/bin/resolver/resolver_log.h b/src/bin/resolver/resolver_log.h
index 63f6abb..8378b98 100644
--- a/src/bin/resolver/resolver_log.h
+++ b/src/bin/resolver/resolver_log.h
@@ -16,7 +16,7 @@
 #define __RESOLVER_LOG__H
 
 #include <log/macros.h>
-#include "resolverdef.h"
+#include "resolver_messages.h"
 
 /// \brief Resolver Logging
 ///
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
new file mode 100644
index 0000000..4ce0f7d
--- /dev/null
+++ b/src/bin/resolver/resolver_messages.mes
@@ -0,0 +1,198 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# along with the resolver methods.
+
+% RESOLVER_AXFR_TCP AXFR request received over TCP
+A debug message, the resolver received a NOTIFY message over TCP.  The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+
+% RESOLVER_AXFR_UDP AXFR request received over UDP
+A debug message, the resolver received a NOTIFY message over UDP.  The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+
+% RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small
+An error indicating that the configuration value specified for the query
+timeout is too small.
+
+% RESOLVER_CONFIG_CHANNEL configuration channel created
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+
+% RESOLVER_CONFIG_ERROR error in configuration: %1
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error).  The reason for the error, given as a parameter in the message,
+will give more details.
+
+% RESOLVER_CONFIG_LOADED configuration loaded
+A debug message, output when the resolver configuration has been successfully
+loaded.
+
+% RESOLVER_CONFIG_UPDATED configuration updated: %1
+A debug message, the configuration has been updated with the specified
+information.
+
+% RESOLVER_CREATED main resolver object created
+A debug message, output when the Resolver() object has been created.
+
+% RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+
+% RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2
+A debug message, this contains details of the response sent back to the querying
+system.
+
+% RESOLVER_FAILED resolver failed, reason: %1
+This is an error message output when an unhandled exception is caught by the
+resolver.  All it can do is to shut down.
+
+% RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+
+% RESOLVER_FORWARD_QUERY processing forward query
+The received query has passed all checks and is being forwarded to upstream
+servers.
+
+% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
+A debug message noting that an exception occurred during the processing of
+a received packet.  The packet has been dropped.
+
+% RESOLVER_IXFR IXFR request received
+The resolver received a NOTIFY message over TCP.  The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+
+% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
+A debug message noting that the resolver received a message and the
+parsing of the body of the message failed due to some error (although
+the parsing of the header succeeded).  The message parameters give a
+textual description of the problem and the RCODE returned.
+
+% RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration
+An error message indicating that the resolver configuration has specified a
+negative retry count.  Only zero or positive values are valid.
+
+% RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+
+% RESOLVER_NORMAL_QUERY processing normal query
+The received query has passed all checks and is being processed by the resolver.
+
+% RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
+The resolver received a NOTIFY message.  As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+
+% RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message.  This is a malformed
+message, as a DNS query must contain only one question.  The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+
+% RESOLVER_NO_ROOT_ADDRESS no root addresses available
+A warning message during startup, indicates that no root addresses have been
+set.  This may be because the resolver will get them from a priming query.
+
+% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded).  The message parameters give
+a textual description of the problem and the RCODE returned.
+
+% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
+This message is logged when a "print_message" command is received over the
+command channel.
+
+% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded).  The message parameters give a textual
+description of the problem and the RCODE returned.
+
+% RESOLVER_QUERY_SETUP query setup
+A debug message noting that the resolver is creating a RecursiveQuery object.
+
+% RESOLVER_QUERY_SHUTDOWN query shutdown
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+
+% RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small
+An error indicating that the configuration value specified for the query
+timeout is too small.
+
+% RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message
+A debug message indicating that the resolver has received a message.  Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+
+% RESOLVER_RECURSIVE running in recursive mode
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+
+% RESOLVER_SERVICE_CREATED service object created
+A debug message, output when the main service object (which handles the
+received queries) is created.
+
+% RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
+A debug message, lists the parameters being set for the resolver.  These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers.  Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query.  Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+
+The client and lookup timeouts require a bit more explanation. The
+resolution of the client query might require a large number of queries to
+upstream nameservers.  Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout.  When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolver gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+
+% RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+
+% RESOLVER_SHUTDOWN resolver shutdown complete
+This information message is output when the resolver has shut down.
+
+% RESOLVER_STARTED resolver started
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+
+% RESOLVER_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+
+% RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes).  It will return a message to the sender
+with the RCODE set to NOTIMP.
diff --git a/src/bin/resolver/resolverdef.mes b/src/bin/resolver/resolverdef.mes
deleted file mode 100644
index 47433a4..0000000
--- a/src/bin/resolver/resolverdef.mes
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX RESOLVER_
-# No namespace declaration - these constants go in the global namespace
-# along with the resolver methods.
-
-% AXFRTCP       AXFR request received over TCP
-A debug message, the resolver received a NOTIFY message over TCP.  The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
-
-% AXFRUDP       AXFR request received over UDP
-A debug message, the resolver received a NOTIFY message over UDP.  The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
-
-% CONFIGCHAN    configuration channel created
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
-
-% CONFIGERR     error in configuration: %1
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error).  The reason for the error, given as a parameter in the message,
-will give more details.
-
-% CONFIGLOAD    configuration loaded
-A debug message, output when the resolver configuration has been successfully
-loaded.
-
-% CONFIGUPD     configuration updated: %1
-A debug message, the configuration has been updated with the specified
-information.
-
-% DNSMSGRCVD    DNS message received: %1
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
-
-% DNSMSGSENT    DNS message of %1 bytes sent: %2
-A debug message, this contains details of the response sent back to the querying
-system.
-
-% CLTMOSMALL    client timeout of %1 is too small
-An error indicating that the configuration value specified for the query
-timeout is too small.
-
-% CREATED       main resolver object created
-A debug message, output when the Resolver() object has been created.
-
-% FAILED        resolver failed, reason: %1
-This is an error message output when an unhandled exception is caught by the
-resolver.  All it can do is to shut down.
-
-% FWDADDR       setting forward address %1(%2)
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
-
-% FWDQUERY      processing forward query
-The received query has passed all checks and is being forwarded to upstream
-servers.
-
-% HDRERR        message received, exception when processing header: %1
-A debug message noting that an exception occurred during the processing of
-a received packet.  The packet has been dropped.
-
-% IXFR          IXFR request received
-The resolver received a NOTIFY message over TCP.  The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
-
-% LKTMOSMALL    lookup timeout of %1 is too small
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
-
-% NFYNOTAUTH    NOTIFY arrived but server is not authoritative
-The resolver received a NOTIFY message.  As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-
-% NORMQUERY     processing normal query
-The received query has passed all checks and is being processed by the resolver.
-
-% NOTIN         non-IN class request received, returning REFUSED message
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
-
-% NOROOTADDR    no root addresses available
-A warning message during startup, indicates that no root addresses have been
-set.  This may be because the resolver will get them from a priming query.
-
-% NOTONEQUES    query contained %1 questions, exactly one question was expected
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message.  This is a malformed
-message, as a DNS query must contain only one question.  The resolver will
-return a message to the sender with the RCODE set to FORMERR.
-
-% OPCODEUNS     opcode %1 not supported by the resolver
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes).  It will return a message to the sender
-with the RCODE set to NOTIMP.
-
-% PARSEERR      error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded).  The message parameters give
-a textual description of the problem and the RCODE returned.
-
-% PRINTMSG      print message command, arguments are: %1
-This message is logged when a "print_message" command is received over the
-command channel.
-
-% PROTERR       protocol error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded).  The message parameters give a textual
-description of the problem and the RCODE returned.
-
-% QUSETUP       query setup
-A debug message noting that the resolver is creating a RecursiveQuery object.
-
-% QUSHUT        query shutdown
-A debug message noting that the resolver is destroying a RecursiveQuery object.
-
-% QUTMOSMALL    query timeout of %1 is too small
-An error indicating that the configuration value specified for the query
-timeout is too small.
-
-% RECURSIVE     running in recursive mode
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-
-% RECVMSG       resolver has received a DNS message
-A debug message indicating that the resolver has received a message.  Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
-
-% RETRYNEG      negative number of retries (%1) specified in the configuration
-An error message indicating that the resolver configuration has specified a
-negative retry count.  Only zero or positive values are valid.
-
-% ROOTADDR      setting root address %1(%2)
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-
-% SERVICE       service object created
-A debug message, output when the main service object (which handles the
-received queries) is created.
-
-% SETPARAM      query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
-A debug message, lists the parameters associated with the message.  These are:
-query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers.  Client timeout: the interval to resolver a query by
-a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
-resolver gives up trying to resolve a query.  Retry count: the number of times
-the resolver will retry a query to an upstream server if it gets a timeout.
-
-The client and lookup timeouts require a bit more explanation. The
-resolution of the client query might require a large number of queries to
-upstream nameservers.  Even if none of these queries timeout, the total time
-taken to perform all the queries may exceed the client timeout.  When this
-happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache.  However,
-there comes a time - the lookup timeout - when even the resolve gives up.
-At this point it will wait for pending upstream queries to complete or
-timeout and drop the query.
-
-% SHUTDOWN      resolver shutdown complete
-This information message is output when the resolver has shut down.
-
-% STARTED       resolver started
-This informational message is output by the resolver when all initialization
-has been completed and it is entering its main loop.
-
-% STARTING      starting resolver with command line '%1'
-An informational message, this is output when the resolver starts up.
-
-% UNEXRESP      received unexpected response, ignoring
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 35b5398..e0d9fd4 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -28,7 +28,7 @@ run_unittests_SOURCES += resolver_config_unittest.cc
 run_unittests_SOURCES += response_scrubber_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
 
-nodist_run_unittests_SOURCES = ../resolverdef.h ../resolverdef.cc
+nodist_run_unittests_SOURCES = ../resolver_messages.h ../resolver_messages.cc
 
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 4340c64..b5bcea2 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = process_rename_test.py
 # .py will be generated by configure, so we don't have to include it
 # in EXTRA_DIST.
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -12,6 +19,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index 8a29949..0af9be6 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,12 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
 
 b10_xfrindir = $(pkgdatadir)
 b10_xfrin_DATA = xfrin.spec
+pyexec_DATA = xfrin_messages.py
 
-CLEANFILES = b10-xfrin xfrin.pyc 
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
 
 man_MANS = b10-xfrin.8
 EXTRA_DIST = $(man_MANS) b10-xfrin.xml
-EXTRA_DIST += xfrin.spec
+EXTRA_DIST += xfrin.spec xfrin_messages.mes
 
 if ENABLE_MAN
 
@@ -20,8 +21,12 @@ b10-xfrin.8: b10-xfrin.xml
 
 endif
 
+# Define rule to build logging source files from message file
+xfrin_messages.py: xfrin_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py
+b10-xfrin: xfrin.py xfrin_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
 	chmod a+x $@
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index d4efbc7..0f485aa 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	$(LIBRARY_PATH_PLACEHOLDER) \
+	env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index a9ca0f2..64e3563 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -29,12 +29,17 @@ from isc.config.ccsession import *
 from isc.notify import notify_out
 import isc.util.process
 import isc.net.parse
+from xfrin_messages import *
+
+isc.log.init("b10-xfrin")
+logger = isc.log.Logger("xfrin")
+
 try:
     from pydnspp import *
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrin process
     # must keep running, so we warn about it and move forward.
-    sys.stderr.write('[b10-xfrin] failed to import DNS module: %s\n' % str(e))
+    logger.error(XFRIN_IMPORT_DNS, str(e))
 
 isc.util.process.rename()
 
@@ -69,9 +74,6 @@ __version__ = 'BIND10'
 XFRIN_OK = 0
 XFRIN_FAIL = 1
 
-def log_error(msg):
-    sys.stderr.write("[b10-xfrin] %s\n" % str(msg))
-
 class XfrinException(Exception):
     pass
 
@@ -150,8 +152,7 @@ class XfrinConnection(asyncore.dispatcher):
             self.connect(self._master_address)
             return True
         except socket.error as e:
-            self.log_msg('Failed to connect:(%s), %s' % (self._master_address,
-                                                            str(e)))
+            logger.error(CONNECT_MASTER, self._master_address, str(e))
             return False
 
     def _create_query(self, query_type):
@@ -264,31 +265,27 @@ class XfrinConnection(asyncore.dispatcher):
                 logstr = 'SOA check for \'%s\' ' % self._zone_name
                 ret =  self._check_soa_serial()
 
-            logstr = 'transfer of \'%s\': AXFR ' % self._zone_name
             if ret == XFRIN_OK:
-                self.log_msg(logstr + 'started')
+                logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
                 self._send_query(RRType.AXFR())
                 isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
                                             self._handle_xfrin_response)
 
-                self.log_msg(logstr + 'succeeded')
+                logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
 
         except XfrinException as e:
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
             #TODO, recover data source.
         except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
         except UserWarning as e:
             # XXX: this is an exception from our C++ library via the
             # Boost.Python binding.  It would be better to have more more
             # specific exceptions, but at this moment this is the finest
             # granularity.
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
         finally:
            self.close()
@@ -395,11 +392,6 @@ class XfrinConnection(asyncore.dispatcher):
         # Overwrite the log function, log nothing
         pass
 
-    def log_msg(self, msg):
-        if self._verbose:
-            sys.stdout.write('[b10-xfrin] %s\n' % str(msg))
-
-
 def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
                   shutdown_event, master_addrinfo, check_soa, verbose,
                   tsig_key):
@@ -481,8 +473,8 @@ class ZoneInfo:
             try:
                 self.master_addr = isc.net.parse.addr_parse(master_addr_str)
             except ValueError:
+                logger.error(XFRIN_BAD_MASTER_ADDR_FORMAT, master_addr_str)
                 errmsg = "bad format for zone's master: " + master_addr_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_master_port(self, master_port_str):
@@ -496,8 +488,8 @@ class ZoneInfo:
             try:
                 self.master_port = isc.net.parse.port_parse(master_port_str)
             except ValueError:
+                logger.error(XFRIN_BAD_MASTER_PORT_FORMAT, master_port_str)
                 errmsg = "bad format for zone's master port: " + master_port_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_zone_class(self, zone_class_str):
@@ -514,8 +506,8 @@ class ZoneInfo:
             try:
                 self.rrclass = RRClass(zone_class_str)
             except InvalidRRClass:
+                logger.error(XFRIN_BAD_ZONE_CLASS, zone_class_str)
                 errmsg = "invalid zone class: " + zone_class_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_tsig_key(self, tsig_key_str):
@@ -529,8 +521,8 @@ class ZoneInfo:
             try:
                 self.tsig_key = TSIGKey(tsig_key_str)
             except InvalidParameter as ipe:
+                logger.error(XFRIN_BAD_TSIG_KEY_STRING, tsig_key_str)
                 errmsg = "bad TSIG key string: " + tsig_key_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def get_master_addr_info(self):
@@ -556,7 +548,8 @@ class Xfrin:
         self._send_cc_session = isc.cc.Session()
         self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
                                               self.config_handler,
-                                              self.command_handler)
+                                              self.command_handler,
+                                              None, True)
         self._module_cc.start()
         config_data = self._module_cc.get_full_config()
         self.config_handler(config_data)
@@ -635,7 +628,7 @@ class Xfrin:
                 if zone_info is None:
                     # TODO what to do? no info known about zone. defaults?
                     errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
-                    log_error(errmsg)
+                    logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
                     answer = create_answer(1, errmsg)
                 else:
                     master_addr = zone_info.get_master_addr_info()
@@ -670,7 +663,7 @@ class Xfrin:
             else:
                 answer = create_answer(1, 'unknown command: ' + command)
         except XfrinException as err:
-            log_error('error happened for command: %s, %s' % (command, str(err)) )
+            logger.error(XFRIN_COMMAND_ERROR, command, str(err))
             answer = create_answer(1, str(err))
         return answer
 
@@ -762,8 +755,7 @@ class Xfrin:
                 except isc.cc.session.SessionTimeout:
                     pass        # for now we just ignore the failure
             except socket.error as err:
-                log_error("Fail to send message to %s and %s, msgq may has been killed"
-                          % (XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME))
+                logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME)
         else:
             msg = create_command(ZONE_XFRIN_FAILED, param)
             # catch the exception, in case msgq has been killed.
@@ -775,8 +767,7 @@ class Xfrin:
                 except isc.cc.session.SessionTimeout:
                     pass        # for now we just ignore the failure
             except socket.error as err:
-                log_error("Fail to send message to %s, msgq may has been killed"
-                          % ZONE_MANAGER_MODULE_NAME)
+                logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
 
     def startup(self):
         while not self._shutdown_event.is_set():
@@ -844,12 +835,11 @@ def main(xfrin_class, use_signal = True):
         xfrind = xfrin_class(verbose = options.verbose)
         xfrind.startup()
     except KeyboardInterrupt:
-        log_error("exit b10-xfrin")
+        logger.info(XFRIN_STOPPED_BY_KEYBOARD)
     except isc.cc.session.SessionError as e:
-        log_error(str(e))
-        log_error('Error happened! is the command channel daemon running?')
+        logger.error(XFRIN_CC_SESSION_ERROR, str(e))
     except Exception as e:
-        log_error(str(e))
+        logger.error(XFRIN_UNKNOWN_ERROR, str(e))
 
     if xfrind:
         xfrind.shutdown()
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
new file mode 100644
index 0000000..80a0be3
--- /dev/null
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -0,0 +1,91 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+
+% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+
+% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
+The AXFR transfer of the given zone was successfully completed.
+
+% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
+The given master address is not a valid IP address.
+
+% XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1
+The master port as read from the configuration is not a valid port number.
+
+% XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFRIN_BAD_ZONE_CLASS Invalid zone class: %1
+The zone class as read from the configuration is not a valid DNS class.
+
+% XFRIN_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+
+% XFRIN_COMMAND_ERROR error while executing command '%1': %2
+There was an error while the given command was being processed. The
+error is given in the log message.
+
+% XFRIN_CONNECT_MASTER error connecting to master at %1: %2
+There was an error opening a connection to the master. The error is
+shown in the log message.
+
+% XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+
+% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+
+% XFRIN_IMPORT_DNS error importing python DNS module: %1
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+
+% XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+
+% XFRIN_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+
+% XFRIN_UNKNOWN_ERROR unknown error: %1
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index 82d7652..c5492ad 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -6,11 +6,12 @@ pkglibexec_SCRIPTS = b10-xfrout
 
 b10_xfroutdir = $(pkgdatadir)
 b10_xfrout_DATA = xfrout.spec
+pyexec_DATA = xfrout_messages.py
 
-CLEANFILES=	b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES=	b10-xfrout xfrout.pyc xfrout.spec xfrout_messages.py xfrout_messages.pyc
 
 man_MANS = b10-xfrout.8
-EXTRA_DIST = $(man_MANS) b10-xfrout.xml
+EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
 
 if ENABLE_MAN
 
@@ -19,12 +20,15 @@ b10-xfrout.8: b10-xfrout.xml
 
 endif
 
+# Define rule to build logging source files from message file
+xfrout_messages.py: xfrout_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrout/xfrout_messages.mes
 
 xfrout.spec: xfrout.spec.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
 
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py
+b10-xfrout: xfrout.py xfrout_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
 	chmod a+x $@
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 18503e7..6ca2b42 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
+	env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 40bad85..a75ff22 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -26,7 +26,6 @@ from isc.datasrc import sqlite3_ds
 from socketserver import *
 import os
 from isc.config.ccsession import *
-#from isc.log.log import *
 from isc.cc import SessionError, SessionTimeout
 from isc.notify import notify_out
 import isc.util.process
@@ -36,13 +35,18 @@ import errno
 from optparse import OptionParser, OptionValueError
 from isc.util import socketserver_mixin
 
+from xfrout_messages import *
+
+isc.log.init("b10-xfrout")
+logger = isc.log.Logger("xfrout")
+
 try:
     from libutil_io_python import *
     from pydnspp import *
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrout process
     # must keep running, so we warn about it and move forward.
-    sys.stderr.write('[b10-xfrout] failed to import DNS or isc.util.io module: %s\n' % str(e))
+    log.error(XFROUT_IMPORT, str(e))
 
 isc.util.process.rename()
 
@@ -110,7 +114,7 @@ class XfroutSession():
             self.dns_xfrout_start(self._sock_fd, self._request_data)
             #TODO, avoid catching all exceptions
         except Exception as e:
-            #self._log.log_message("error", str(e))
+            logger.error(XFROUT_HANDLE_QUERY_ERROR, str(e))
             pass
 
         os.close(self._sock_fd)
@@ -138,7 +142,7 @@ class XfroutSession():
             rcode = self._check_request_tsig(msg, mdata)
 
         except Exception as err:
-            #self._log.log_message("error", str(err))
+            logger.error(XFROUT_PARSE_QUERY_ERROR, str(err))
             return Rcode.FORMERR(), None
 
         return rcode, msg
@@ -147,6 +151,9 @@ class XfroutSession():
         question = msg.get_question()[0]
         return question.get_name().to_text()
 
+    def _get_query_zone_class(self, msg):
+        question = msg.get_question()[0]
+        return question.get_class().to_text()
 
     def _send_data(self, sock_fd, data):
         size = len(data)
@@ -243,19 +250,23 @@ class XfroutSession():
             return self._reply_query_with_format_error(msg, sock_fd)
 
         zone_name = self._get_query_zone_name(msg)
+        zone_class_str = self._get_query_zone_class(msg)
+        # TODO: should we not also include class in the check?
         rcode_ = self._check_xfrout_available(zone_name)
+
         if rcode_ != Rcode.NOERROR():
-            #self._log.log_message("info", "transfer of '%s/IN' failed: %s",
-            #                      zone_name, rcode_.to_text())
+            logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
+                        zone_class_str, rcode_.to_text())
             return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
 
         try:
-            #self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
+            logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
             self._reply_xfrout_query(msg, sock_fd, zone_name)
-            #self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
         except Exception as err:
-            #self._log.log_message("error", str(err))
+            logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
+                         zone_class_str, str(err))
             pass
+        logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
 
         self._server.decrease_transfers_counter()
         return
@@ -319,7 +330,7 @@ class XfroutSession():
 
         for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
             if  self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
-                #self._log.log_message("info", "xfrout process is being shutdown")
+                logger.info(XFROUT_STOPPING)
                 return
             # TODO: RRType.SOA() ?
             if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
@@ -396,7 +407,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         try:
             request, client_address = self.get_request()
         except socket.error:
-            #self._log.log_message("error", "Failed to fetch request")
+            logger.error(XFROUT_FETCH_REQUEST_ERROR)
             return
 
         # Check self._shutdown_event to ensure the real shutdown comes.
@@ -410,7 +421,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
                     (rlist, wlist, xlist) = ([], [], [])
                     continue
                 else:
-                    #self._log.log_message("error", "Error with select(): %s" %e)
+                    logger.error(XFROUT_SOCKET_SELECT_ERROR, str(e))
                     break
 
             # self.server._shutdown_event will be set by now, if it is not a false
@@ -420,9 +431,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
 
             try:
                 self.process_request(request)
-            except:
-                #self._log.log_message("error", "Exception happened during processing of %s"
-                #                      % str(client_address))
+            except Exception as pre:
+                log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
                 break
 
     def _handle_request_noblock(self):
@@ -440,8 +450,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             # This may happen when one xfrout process try to connect to
             # xfrout unix socket server, to check whether there is another
             # xfrout running.
-            #if sock_fd == FD_COMM_ERROR:
-                #self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
+            if sock_fd == FD_COMM_ERROR:
+                logger.error(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR)
             return
 
         # receive request msg
@@ -466,8 +476,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         If it's not a socket file or nobody is listening
         , it will be removed. If it can't be removed, exit from python. '''
         if self._sock_file_in_use(sock_file):
-            #self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
-            #                     " is being used by another xfrout process\n" % sock_file)
+            logger.error(XFROUT_UNIX_SOCKET_FILE_IN_USE, sock_file)
             sys.exit(0)
         else:
             if not os.path.exists(sock_file):
@@ -476,7 +485,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             try:
                 os.unlink(sock_file)
             except OSError as err:
-                #self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
+                logger.error(XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR, sock_file, str(err))
                 sys.exit(0)
 
     def _sock_file_in_use(self, sock_file):
@@ -497,18 +506,17 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         try:
             os.unlink(self._sock_file)
         except Exception as e:
-            #self._log.log_message('error', str(e))
+            logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
             pass
 
     def update_config_data(self, new_config):
         '''Apply the new config setting of xfrout module. '''
-        #self._log.log_message('info', 'update config data start.')
+        logger.info(XFROUT_NEW_CONFIG)
         self._lock.acquire()
         self._max_transfers_out = new_config.get('transfers_out')
         self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
-        #self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
         self._lock.release()
-        #self._log.log_message('info', 'update config data complete.')
+        logger.info(XFROUT_NEW_CONFIG_DONE)
 
     def set_tsig_key_ring(self, key_list):
         """Set the tsig_key_ring , given a TSIG key string list representation. """
@@ -523,8 +531,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             try:
                 self.tsig_key_ring.add(TSIGKey(key_item))
             except InvalidParameter as ipe:
-                errmsg = "bad TSIG key string: " + str(key_item)
-                #self._log.log_message('error', '%s' % errmsg)
+                logger.error(XFROUT_BAD_TSIG_KEY_STRING, str(key_item))
 
     def get_db_file(self):
         file, is_default = self._cc.get_remote_config_value("Auth", "database_file")
@@ -559,7 +566,7 @@ class XfroutServer:
         #self._log = None
         self._listen_sock_file = UNIX_SOCKET_FILE
         self._shutdown_event = threading.Event()
-        self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler)
+        self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler, None, True)
         self._config_data = self._cc.get_full_config()
         self._cc.start()
         self._cc.add_remote_config(AUTH_SPECFILE_LOCATION);
@@ -624,7 +631,7 @@ class XfroutServer:
 
     def command_handler(self, cmd, args):
         if cmd == "shutdown":
-            #self._log.log_message("info", "Received shutdown command.")
+            logger.info(XFROUT_RECEIVED_SHUTDOWN_COMMAND)
             self.shutdown()
             answer = create_answer(0)
 
@@ -632,8 +639,7 @@ class XfroutServer:
             zone_name = args.get('zone_name')
             zone_class = args.get('zone_class')
             if zone_name and zone_class:
-                #self._log.log_message("info", "zone '%s/%s': receive notify others command" \
-                #                       % (zone_name, zone_class))
+                logger.info(XFROUT_NOTIFY_COMMAND, zone_name, zone_class)
                 self.send_notify(zone_name, zone_class)
                 answer = create_answer(0)
             else:
@@ -676,15 +682,11 @@ if '__main__' == __name__:
         xfrout_server = XfroutServer()
         xfrout_server.run()
     except KeyboardInterrupt:
-        sys.stderr.write("[b10-xfrout] exit xfrout process\n")
+        logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
     except SessionError as e:
-        sys.stderr.write("[b10-xfrout] Error creating xfrout, "
-                           "is the command channel daemon running?\n")
+        logger.error(XFROUT_CC_SESSION_ERROR, str(e))
     except SessionTimeout as e:
-        sys.stderr.write("[b10-xfrout] Error creating xfrout, "
-                           "is the configuration manager running?\n")
-    except ModuleCCSessionError as e:
-        sys.stderr.write("[b10-xfrout] exit xfrout process:%s\n" % str(e))
+        logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
 
     if xfrout_server:
         xfrout_server.shutdown()
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
new file mode 100644
index 0000000..2dada54
--- /dev/null
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -0,0 +1,140 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrout messages python module.
+
+% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
+A transfer out of the given zone has started.
+
+% XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFROUT_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+
+% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
+There was a problem reading a response from antoher module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+
+% XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+
+% XFROUT_HANDLE_QUERY_ERROR error while handling query: %1
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+
+% XFROUT_IMPORT error importing python module: %1
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+
+% XFROUT_NEW_CONFIG Update xfrout configuration
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+
+% XFROUT_NEW_CONFIG_DONE Update xfrout configuration done
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+
+% XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+
+% XFROUT_PARSE_QUERY_ERROR error parsing query: %1
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+
+% XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+
+% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+
+% XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+
+% XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+
+% XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+
+% XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+
+% XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+
+% XFROUT_STOPPING the xfrout daemon is shutting down
+The current transfer is aborted, as the xfrout daemon is shutting down.
+
+% XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 496c1a4..97f9b5e 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = zonemgr_test.py
 EXTRA_DIST = $(PYTESTS)
 CLEANFILES = initdb.file
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -12,6 +19,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index b063289..defaf13 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -1,6 +1,28 @@
-SUBDIRS = tests
+SUBDIRS = . tests
 
-EXTRA_DIST = check.h acl.h
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
 
-# TODO: Once we have some cc file we are able to compile, create the library.
-# For now, we have only header files, not creating empty library.
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+# The core library
+lib_LTLIBRARIES = libacl.la
+libacl_la_SOURCES  = acl.h
+libacl_la_SOURCES += check.h
+libacl_la_SOURCES += ip_check.h ip_check.cc
+libacl_la_SOURCES += logic_check.h
+libacl_la_SOURCES += loader.h loader.cc
+
+libacl_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
+
+# DNS specialized one
+lib_LTLIBRARIES += libdnsacl.la
+
+libdnsacl_la_SOURCES = dns.h dns.cc
+
+libdnsacl_la_LIBADD = libacl.la
+libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
+
+CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
new file mode 100644
index 0000000..16f1bf5
--- /dev/null
+++ b/src/lib/acl/dns.cc
@@ -0,0 +1,34 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "dns.h"
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+Loader&
+getLoader() {
+    static Loader* loader(NULL);
+    if (loader == NULL) {
+        loader = new Loader(REJECT);
+        // TODO: This is the place where we register default check creators
+        // like IP check, etc, once we have them.
+    }
+    return (*loader);
+}
+
+}
+}
+}
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
new file mode 100644
index 0000000..6f36e51
--- /dev/null
+++ b/src/lib/acl/dns.h
@@ -0,0 +1,89 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_DNS_H
+#define ACL_DNS_H
+
+#include "loader.h"
+
+#include <asiolink/io_address.h>
+#include <dns/message.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/**
+ * \brief DNS request to be checked.
+ *
+ * This plays the role of Context of the generic template ACLs (in namespace
+ * isc::acl).
+ *
+ * It is simple structure holding just the bunch of information. Therefore
+ * the names don't end up with a slash, there are no methods so they can't be
+ * confused with local variables.
+ *
+ * \todo Do we want a constructor to set this in a shorter manner? So we can
+ *     call the ACLs directly?
+ */
+struct RequestContext {
+    /// \brief The DNS message (payload).
+    isc::dns::ConstMessagePtr message;
+    /// \brief The remote IP address (eg. the client).
+    asiolink::IOAddress remote_address;
+    /// \brief The local IP address (ours, of the interface where we received).
+    asiolink::IOAddress local_address;
+    /// \brief The remote port.
+    uint16_t remote_port;
+    /// \brief The local port.
+    uint16_t local_port;
+    /**
+     * \brief Name of the TSIG key the message is signed with.
+     *
+     * This will be either the name of the TSIG key the message is signed with,
+     * or empty string, if the message is not signed. It is true we could get
+     * the information from the message itself, but because at the time when
+     * the ACL is checked, the signature has been verified already, so passing
+     * it around is probably cheaper.
+     *
+     * It is expected that messages with invalid signatures are handled before
+     * ACL.
+     */
+    std::string tsig_key_name;
+};
+
+/// \brief DNS based check.
+typedef acl::Check<RequestContext> Check;
+/// \brief DNS based compound check.
+typedef acl::CompoundCheck<RequestContext> CompoundCheck;
+/// \brief DNS based ACL.
+typedef acl::ACL<RequestContext> ACL;
+/// \brief DNS based ACL loader.
+typedef acl::Loader<RequestContext> Loader;
+
+/**
+ * \brief Loader singleton access function.
+ *
+ * This function returns a loader of ACLs. It is expected applications
+ * will use this function instead of creating their own loaders, because
+ * one is enough, this one will have registered default checks and it
+ * is known one, so any plugins can registrer additional checks as well.
+ */
+Loader& getLoader();
+
+}
+}
+}
+
+#endif
diff --git a/src/lib/acl/ip_check.cc b/src/lib/acl/ip_check.cc
new file mode 100644
index 0000000..08c8431
--- /dev/null
+++ b/src/lib/acl/ip_check.cc
@@ -0,0 +1,111 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/lexical_cast.hpp>
+
+#include <acl/ip_check.h>
+
+using namespace std;
+
+// Split the IP Address prefix
+
+namespace isc {
+namespace acl {
+namespace internal {
+
+uint8_t
+createMask(size_t prefixlen) {
+
+    if (prefixlen == 0) {
+        return (0);
+
+    } else if (prefixlen <= 8) {
+
+        // In the following discussion:
+        //
+        // w is the width of the data type in bits.
+        // m is the value of prefixlen, the number of most signifcant bits we
+        // want to set.
+        // ** is exponentiation (i.e. 2**n is 2 raised to the power of n).
+        //
+        // We note that the value of 2**m - 1 gives a value with the least
+        // significant m bits set.  For a data type of width w, this means that
+        // the most signficant (w-m) bits are clear.
+        //
+        // Hence the value 2**(w-m) - 1 gives a result with the least signficant
+        // w-m bits set and the most significant m bits clear.  The 1's
+        // complement of this value gives is the result we want.
+        //
+        // Final note: at this point in the logic, m is non-zero, so w-m < w.
+        // This means 1<<(w-m) will fit into a variable of width w bits.  In
+        // other words, in the expression below, no term will cause an integer
+        // overflow.
+        return (~((1 << (8 - prefixlen)) - 1));
+    }
+
+    // Mask size is too large. (Note that prefixlen is unsigned, so can't be
+    // negative.)
+    isc_throw(isc::OutOfRange, "prefixlen argument must be between 0 and 8");
+}
+
+pair<string, int>
+splitIPAddress(const string& ipprefix) {
+
+    // Split string into its components - an address and a prefix length.
+    // We initialize by assuming that there is no slash in the string given.
+    string address = ipprefix;
+    string prefixlen = "";
+
+    const size_t slashpos = ipprefix.find('/');
+    if ((ipprefix.size() == 0) || (slashpos == 0) ||
+        (slashpos == (ipprefix.size() - 1))) {
+        // Nothing in prefix, or it starts with or ends with a slash.
+        isc_throw(isc::InvalidParameter, "address prefix of " << ipprefix <<
+                                         " is not valid");
+
+    } else if (slashpos != string::npos) {
+        // There is a slash somewhere in the string, split the string on it.
+        // Don't worry about multiple slashes - if there are some, they will
+        // appear in the prefixlen segment and will be detected when an attempt
+        // is made to convert it to a number.
+        address = ipprefix.substr(0, slashpos);
+        prefixlen = ipprefix.substr(slashpos + 1);
+    }
+
+    // Set the default value for the prefix length.  As the type of the address
+    // is not known at the point this function is called, the maximum
+    // allowable value is also not known.  The value of 0 is reserved for
+    // a "match any address" match.
+    int prefix_size = -1;
+
+    // If there is a prefixlength, attempt to convert it.
+    if (!prefixlen.empty()) {
+        try {
+            prefix_size = boost::lexical_cast<int>(prefixlen);
+            if (prefix_size < 0) {
+                isc_throw(isc::InvalidParameter, "address prefix of " <<
+                          ipprefix << " is not valid");
+            }
+        } catch (boost::bad_lexical_cast&) {
+            isc_throw(isc::InvalidParameter, "prefix length of '" <<
+                      prefixlen << "' is not valid");
+        }
+    }
+
+    return (make_pair(address, prefix_size));
+}
+
+} // namespace internal
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/acl/ip_check.h b/src/lib/acl/ip_check.h
new file mode 100644
index 0000000..5bc70fc
--- /dev/null
+++ b/src/lib/acl/ip_check.h
@@ -0,0 +1,354 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IP_CHECK_H
+#define __IP_CHECK_H
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+#include <boost/static_assert.hpp>
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sys/socket.h> // for AF_INET/AF_INET6
+#include <netinet/in.h>
+
+#include <acl/check.h>
+#include <exceptions/exceptions.h>
+#include <util/strutil.h>
+
+namespace isc {
+namespace acl {
+
+// Free functions.  These are not supposed to be used outside this module,
+// but are declared public for testing.  To try to conceal them, they are
+// put in an "internal" namespace.
+
+namespace internal {
+
+/// \brief Convert prefix length to mask
+///
+/// Given a prefix length and a data type, return a value of that data type
+/// with the most significant "prefix length" bits set.  For example, if the
+/// data type is an uint8_t and the prefix length is 3, the function would
+/// return a uint8_t holding the binary value 11100000.  This value is used as
+/// a mask in the address checks.
+///
+/// \param prefixlen number of bits to be set in the mask.  This must be
+///        between 0 and 8.
+///
+/// \return uint8_t with the most significant "prefixlen" bits set.
+///
+/// \exception OutOfRange prefixlen is too large for the data type.
+
+uint8_t createMask(size_t prefixlen);
+
+/// \brief Split IP Address Prefix
+///
+/// Splits an IP address prefix (given in the form of "xxxxxx/n" or "xxxxx" into
+/// a string representing the IP address and a number giving the length of the
+/// prefix. (In the latter case, the prefix is equal in length to the width in
+/// width in bits of the data type holding the address.) An exception will be
+/// thrown if the string format is invalid or if the prefix length is invalid.
+///
+/// N.B. This function does NOT check that the address component is a valid IP
+/// address; this is done elsewhere in the address parsing process.
+///
+/// \param ipprefix Address or address prefix.  The string should be passed
+///                 without leading or trailing spaces.
+///
+/// \return Pair of (string, int) holding the address string and the prefix
+///         length.  The second element is -1 if no prefix was given.
+///
+/// \exception InvalidParameter Address prefix not of the expected syntax
+
+std::pair<std::string, int>
+splitIPAddress(const std::string& ipprefix);
+
+} // namespace internal
+
+
+
+/// \brief IP Check
+///
+/// This class performs a match between an IP address prefix specified in an ACL
+/// and a given IP address.  The check works for both IPv4 and IPv6 addresses.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+
+template <typename Context>
+class IPCheck : public Check<Context> {
+private:
+    // Size of uint8_t array needed to hold different address types
+    static const size_t IPV6_SIZE = sizeof(struct in6_addr);
+    static const size_t IPV4_SIZE = sizeof(struct in_addr);
+
+    // Confirm our assumption of relative sizes - this allows us to assume that
+    // an array sized for an IPv6 address can hold an IPv4 address.
+    BOOST_STATIC_ASSERT(sizeof(struct in6_addr) > sizeof(struct in_addr));
+
+public:
+    /// \brief String Constructor
+    ///
+    /// Constructs an IP Check object from an address or address prefix in the
+    /// form <ip-address>/n".
+    ///
+    /// Also allowed are the special keywords "any4" and "any6", which match
+    /// any IPv4 or IPv6 address.  These must be specified in lowercase.
+    ///
+    /// \param ipprefix IP address prefix in the form "<ip-address>/n"
+    ///        (where the "/n" part is optional and should be valid for the
+    ///        address).  If "n" is specified as zero, the match is for any
+    ///        address in that address family.  The address can also be
+    ///        given as "any4" or "any6".
+    IPCheck(const std::string& ipprefix) : family_(0) {
+
+        // Ensure array elements are correctly initialized with zeroes.
+        std::fill(address_, address_ + IPV6_SIZE, 0);
+        std::fill(mask_, mask_ + IPV6_SIZE, 0);
+
+        // Only deal with the string after we've removed leading and trailing
+        // spaces.
+        const std::string mod_prefix = isc::util::str::trim(ipprefix);
+
+        // Check for special cases first.
+        if (mod_prefix == "any4") {
+            family_ = AF_INET;
+
+        } else if (mod_prefix == "any6") {
+            family_ = AF_INET6;
+
+        } else {
+
+            // General address prefix.  Split into address part and prefix
+            // length.
+            const std::pair<std::string, int> result =
+                internal::splitIPAddress(mod_prefix);
+
+            // Try to convert the address.  If successful, the result is in
+            // network-byte order (most significant components at lower
+            // addresses).
+            int status = inet_pton(AF_INET6, result.first.c_str(), address_);
+            if (status == 1) {
+                // It was an IPv6 address.
+                family_ = AF_INET6;
+            } else {
+                // IPv6 interpretation failed, try IPv4.
+                status = inet_pton(AF_INET, result.first.c_str(), address_);
+                if (status == 1) {
+                    family_ = AF_INET;
+                }
+            }
+
+            // Handle errors.
+            if (status == 0) {
+                isc_throw(isc::InvalidParameter, "address prefix of " <<
+                          ipprefix << " is not valid");
+            } else if (status < 0) {
+                isc_throw(isc::Unexpected, "address conversion of " <<
+                          ipprefix << " failed due to a system error");
+            }
+
+            // All done, so set the mask used in the address comparison.
+            setMask(result.second);
+        }
+    }
+
+    /// \brief Destructor
+    virtual ~IPCheck() {}
+
+    /// \brief The check itself
+    ///
+    /// Matches the passed argument to the condition stored here.  Different
+    /// specialisations must be provided for different argument types, and the
+    /// program will fail to compile if a required specialisation is not
+    /// provided.
+    ///
+    /// It is expected that matches() will extract the address information from
+    /// the Context structure, and use compare() to actually perform the
+    /// comparison.
+    ///
+    /// \param context Information to be matched
+    virtual bool matches(const Context& context) const;
+
+    /// \brief Estimated cost
+    ///
+    /// Assume that the cost of the match is linear and depends on the
+    /// maximum number of comparison operations.
+    ///
+    /// \return Estimated cost of the comparison
+    virtual unsigned cost() const {
+        return ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+    }
+
+    ///@{
+    /// Access methods - mainly for testing
+
+    /// \return Stored IP address
+    std::vector<uint8_t> getAddress() const {
+        const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+        return (std::vector<uint8_t>(address_, address_ + vector_len));
+    }
+
+    /// \return Network mask applied to match
+    std::vector<uint8_t> getMask() const {
+        const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+        return (std::vector<uint8_t>(mask_, mask_ + vector_len));
+    }
+
+    /// \return Prefix length of the match
+    size_t getPrefixlen() const {
+        // Work this out by counting bits in the mask.
+        size_t count = 0;
+        for (size_t i = 0; i < IPV6_SIZE; ++i) {
+            if (mask_[i] == 0xff) {
+                // All bits set in this byte
+                count += 8;
+                continue;
+
+            } else if (mask_[i] != 0) {
+                // Only some bits set in this byte.  Count them.
+                uint8_t byte = mask_[i];
+                for (int j = 0; j < 8; ++j) {
+                    count += byte & 0x01;   // Add one if the bit is set
+                    byte >>= 1;             // Go for next bit
+                }
+            }
+            break;
+        }
+        return (count);
+    }
+
+    /// \return Address family
+    int getFamily() const {
+        return (family_);
+    }
+    ///@}
+
+protected:
+    /// \brief Comparison
+    ///
+    /// This is the actual comparison function that checks the IP address passed
+    /// to this class with the matching information in the class itself.  It is
+    /// expected to be called from matches().
+    ///
+    /// \param testaddr Address (in network byte order) to test against the
+    ///                 check condition in the class.  This is expected to
+    ///                 be IPV6_SIZE or IPV4_SIZE bytes long.
+    /// \param family   Address family of testaddr.
+    ///
+    /// \return true if the address matches, false if it does not.
+    virtual bool compare(const uint8_t* testaddr, int family) const {
+
+        if (family != family_) {
+            // Can't match if the address is of the wrong family
+            return (false);
+        }
+
+        // Simple check failed, so have to do a complete match.  To check that
+        // the address given matches the stored network address and mask, we
+        // check the simple condition that:
+        //
+        //     address_given & mask_ == stored_address & mask_
+        //
+        // The result is checked for all bytes for which there are bits set in
+        // the mask.  We stop at the first non-match (or when we run out of bits
+        // in the mask).
+        //
+        // Note that the mask represents a contiguous set of bits.  As such, as
+        // soon as we find a mask byte of zeroes, we have run past the part of
+        // the address where we need to match.
+        //
+        // Note also that when checking an IPv4 address, the constructor has
+        // set all bytes in the mask beyond the first four bytes to zero.
+        // As the loop stops when it encounters a zero mask byte, if the
+        // ACL is for an IPV4 address, the loop will never check more than four
+        // bytes.
+
+        bool match = true;
+        for (int i = 0; match && (i < IPV6_SIZE) && (mask_[i] != 0); ++i) {
+             match = ((testaddr[i] & mask_[i]) == (address_[i] & mask_[i]));
+        }
+        return (match);
+    }
+
+private:
+    /// \brief Set Mask
+    ///
+    /// Sets up the mask from the prefix length.  This involves setting
+    /// an individual mask in each byte of the mask array.
+    ///
+    /// The actual allowed value of the prefix length depends on the address
+    /// family.
+    ///
+    /// \param requested Requested prefix length size.  If negative, the
+    ///        maximum for the address family is assumed.  (A negative value
+    ///        will arise if the string constructor was used and no mask size
+    ///        was given.)
+    void setMask(int requested) {
+
+        // Set the maximum number of bits allowed in the mask, and request
+        // that number of bits if no prefix length was given in the constructor.
+        const int maxmask = 8 * ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+        if (requested < 0) {
+            requested = maxmask;
+        }
+
+        // Validate that the mask is valid.
+        if (requested <= maxmask) {
+
+            // Loop, setting the bits in the set of mask bytes until all the
+            // specified bits have been used up.  As both IPv4 and IPv6
+            // addresses are stored in network-byte order, this works in
+            // both cases.
+            size_t bits_left = requested;   // Bits remaining to set
+            int i = -1;
+            while (bits_left > 0) {
+                if (bits_left >= 8) {
+                    mask_[++i] = ~0;  // All bits set
+                    bits_left -= 8;
+
+                } else if (bits_left > 0) {
+                    mask_[++i] = internal::createMask(bits_left);
+                    bits_left = 0;
+                }
+            }
+        } else {
+            isc_throw(isc::OutOfRange,
+                      "mask size of " << requested << " is invalid " <<
+                      "for the given address family");
+        }
+    }
+
+    // Member variables.
+    uint8_t address_[IPV6_SIZE];  ///< Address in binary form
+    uint8_t mask_[IPV6_SIZE];     ///< Address mask
+    int     family_;              ///< Address family
+};
+
+// Some compilers seem to need this to be explicitly defined outside the class
+template <typename Context>
+const size_t IPCheck<Context>::IPV6_SIZE;
+
+template <typename Context>
+const size_t IPCheck<Context>::IPV4_SIZE;
+
+} // namespace acl
+} // namespace isc
+
+#endif // __IP_CHECK_H
diff --git a/src/lib/acl/loader.cc b/src/lib/acl/loader.cc
new file mode 100644
index 0000000..8ca7e28
--- /dev/null
+++ b/src/lib/acl/loader.cc
@@ -0,0 +1,46 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "loader.h"
+
+using namespace std;
+
+namespace isc {
+namespace acl {
+
+BasicAction defaultActionLoader(data::ConstElementPtr actionEl) {
+    try {
+        const string action(actionEl->stringValue());
+        if (action == "ACCEPT") {
+            return (ACCEPT);
+        } else if (action == "REJECT") {
+            return (REJECT);
+        } else if (action == "DROP") {
+            return (DROP);
+        } else {
+            throw LoaderError(__FILE__, __LINE__,
+                              string("Unknown action '" + action + "'").
+                                  c_str(),
+                              actionEl);
+        }
+    }
+    catch (const data::TypeError&) {
+        throw LoaderError(__FILE__, __LINE__,
+                          "Invalid element type for action, must be string",
+                          actionEl);
+    }
+}
+
+}
+}
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
new file mode 100644
index 0000000..c3400cb
--- /dev/null
+++ b/src/lib/acl/loader.h
@@ -0,0 +1,448 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOADER_H
+#define ACL_LOADER_H
+
+#include "acl.h"
+#include <cc/data.h>
+#include <boost/function.hpp>
+#include <boost/shared_ptr.hpp>
+#include <map>
+
+namespace isc {
+namespace acl {
+
+class AnyOfSpec;
+class AllOfSpec;
+template<typename Mode, typename Context> class LogicOperator;
+
+/**
+ * \brief Exception for bad ACL specifications.
+ *
+ * This will be thrown by the Loader if the ACL description is malformed
+ * in some way.
+ *
+ * It also can hold optional JSON element where was the error detected, so
+ * it can be examined.
+ *
+ * Checks may subclass this exception for similar errors if they see it fit.
+ */
+class LoaderError : public BadValue {
+private:
+    const data::ConstElementPtr element_;
+public:
+    /**
+     * \brief Constructor.
+     *
+     * Should be used with isc_throw if the fourth argument isn't used.
+     *
+     * \param file The file where the throw happened.
+     * \param line Similar as file, just for the line number.
+     * \param what Human readable description of what happened.
+     * \param element This might be passed to hold the JSON element where
+     *     the error was detected.
+     */
+    LoaderError(const char* file, size_t line, const char* what,
+                data::ConstElementPtr element = data::ConstElementPtr()) :
+        BadValue(file, line, what),
+        element_(element)
+    {}
+    ~ LoaderError() throw() {}
+    /**
+     * \brief Get the element.
+     *
+     * This returns the element where the error was detected. Note that it
+     * might be NULL in some situations.
+     */
+    const data::ConstElementPtr& element() const {
+        return (element_);
+    }
+};
+
+/**
+ * \brief Loader of the default actions of ACLs.
+ *
+ * Declared outside the Loader class, as this one does not need to be
+ * templated. This will throw LoaderError if the parameter isn't string
+ * or if it doesn't contain one of the accepted values.
+ *
+ * \param action The JSON representation of the action. It must be a string
+ *     and contain one of "ACCEPT", "REJECT" or "DENY".
+ * \note We could define different names or add aliases if needed.
+ */
+BasicAction defaultActionLoader(data::ConstElementPtr action);
+
+/**
+ * \brief Loader of ACLs.
+ *
+ * The goal of this class is to convert JSON description of an ACL to object
+ * of the ACL class (including the checks inside it).
+ *
+ * The class can be used to load the checks only. This is supposed to be used
+ * by compound checks to create the subexpressions.
+ *
+ * To allow any kind of checks to exist in the application, creators are
+ * registered for the names of the checks.
+ *
+ * An ACL definition looks like this:
+ * \verbatim
+ * [
+ *   {
+ *      "action": "ACCEPT",
+ *      "match-type": <parameter>
+ *   },
+ *   {
+ *      "action": "REJECT",
+ *      "match-type": <parameter>
+ *      "another-match-type": [<parameter1>, <parameter2>]
+*    },
+*    {
+*       "action": "DROP"
+*    }
+ * ]
+ * \endverbatim
+ *
+ * This is a list of elements. Each element must have an "action"
+ * entry/keyword. That one specifies which action is returned if this
+ * element matches (the value of the key is passed to the action loader
+ * (see the constructor). It may be any piece of JSON which the action
+ * loader expects.
+ *
+ * The rest of the element are matches. The left side is the name of the
+ * match type (for example match for source IP address or match for message
+ * size). The <parameter> is whatever is needed to describe the match and
+ * depends on the match type, the loader passes it verbatim to creator
+ * of that match type.
+ *
+ * There may be multiple match types in single element. In such case, all
+ * of the matches must match for the element to take action (so, in the second
+ * element, both "match-type" and "another-match-type" must be satisfied).
+ * If there's no match in the element, the action is taken/returned without
+ * conditions, every time (makes sense as the last entry, as the ACL will
+ * never get past it).
+ *
+ * The second entry shows another thing - if there's a list as the value
+ * for some match and the match itself is not expecting a list, it is taken
+ * as an "or" - a match for at last one of the choices in the list must match.
+ * So, for the second entry, both "match-type" and "another-match-type" must
+ * be satisfied, but the another one is satisfied by either parameter1 or
+ * parameter2.
+ */
+template<typename Context, typename Action = BasicAction> class Loader {
+public:
+    /**
+     * \brief Constructor.
+     *
+     * \param default_action The default action for created ACLs.
+     * \param actionLoader is the loader which will be used to convert actions
+     *     from their JSON representation. The default value is suitable for
+     *     the BasicAction enum. If you did not specify the second
+     *     template argument, you don't need to specify this loader.
+     */
+    Loader(const Action& defaultAction,
+           const boost::function1<Action, data::ConstElementPtr>
+               &actionLoader = &defaultActionLoader) :
+        default_action_(defaultAction),
+        action_loader_(actionLoader)
+    {}
+    /**
+     * \brief Creator of the checks.
+     *
+     * This can be registered within the Loader and will be used to create the
+     * checks. It is expected multiple creators (for multiple types, one can
+     * handle even multiple names) will be created and registered to support
+     * range of things we could check. This allows for customizing/extending
+     * the loader.
+     */
+    class CheckCreator {
+    public:
+        /**
+         * \brief List of names supported by this loader.
+         *
+         * List of all names for which this loader is able to create the
+         * checks. There can be multiple names, to support both aliases
+         * to the same checks and creators capable of creating multiple
+         * types of checks.
+         */
+        virtual std::vector<std::string> names() const = 0;
+        /**
+         * \brief Creates the check.
+         *
+         * This function does the actual creation. It is passed all the
+         * relevant data and is supposed to return shared pointer to the
+         * check.
+         *
+         * It is expected to throw the LoaderError exception when the
+         * definition is invalid.
+         *
+         * \param name The type name of the check. If the creator creates
+         *     only one type of check, it can safely ignore this parameter.
+         * \param definition The part of JSON describing the parameters of
+         *     check. As there's no way for the loader to know how the
+         *     parameters might look like, they are not checked in any way.
+         *     Therefore it's up to the creator (or the check being created)
+         *     to validate the data and throw if it is bad.
+         * \param Current loader calling this creator. This can be used
+         *     to load subexpressions in case of compound check.
+         */
+        virtual boost::shared_ptr<Check<Context> > create(
+            const std::string& name, data::ConstElementPtr definition,
+            const Loader<Context, Action>& loader) = 0;
+        /**
+         * \brief Is list or-abbreviation allowed?
+         *
+         * If this returns true and the parameter (eg. the value we check
+         * against, the one that is passed as the second parameter of create)
+         * is list, the loader will call the create method with each element of
+         * the list and aggregate all the results in OR compound check. If it
+         * is false, the parameter is passed verbatim no matter if it is or
+         * isn't a list. For example, IP check will have this as true (so
+         * multiple IP addresses can be passed as options), but AND operator
+         * will return false and handle the list of subexpressions itself.
+         *
+         * The rationale behind this is that it is common to specify list of
+         * something that matches (eg. list of IP addresses).
+         */
+        virtual bool allowListAbbreviation() const {
+            return (true);
+        }
+    };
+    /**
+     * \brief Register another check creator.
+     *
+     * Adds a creator to the list of known ones. The creator's list of names
+     * must be disjoint with the names already known to the creator or the
+     * LoaderError exception is thrown. In such case, the creator is not
+     * registered under any of the names. In case of other exceptions, like
+     * bad_alloc, only weak exception safety is guaranteed.
+     *
+     * \param creator Shared pointer to the creator.
+     * \note We don't support deregistration yet, but it is expected it will
+     *     be needed in future, when we have some kind of plugins. These
+     *     plugins might want to unload, in which case they would need to
+     *     deregister their creators. It is expected they would pass the same
+     *     pointer to such method as they pass here.
+     */
+    void registerCreator(boost::shared_ptr<CheckCreator> creator) {
+        // First check we can insert all the names
+        typedef std::vector<std::string> Strings;
+        const Strings names(creator->names());
+        for (Strings::const_iterator i(names.begin()); i != names.end();
+             ++i) {
+            if (creators_.find(*i) != creators_.end()) {
+                isc_throw(LoaderError, "The loader already contains creator "
+                          "named " << *i);
+            }
+        }
+        // Now insert them
+        for (Strings::const_iterator i(names.begin()); i != names.end();
+             ++i) {
+            creators_[*i] = creator;
+        }
+    }
+    /**
+     * \brief Load a check.
+     *
+     * This parses a check dict (block, the one element of ACL) and calls a
+     * creator (or creators, if more than one check is found inside) for it. It
+     * ignores the "action" key, as it is a reserved keyword used to specify
+     * actions inside the ACL.
+     *
+     * This may throw LoaderError if it is not a dict or if some of the type
+     * names is not known (there's no creator registered for it). The
+     * exceptions from creators aren't caught.
+     *
+     * \param description The JSON description of the check.
+     */
+    boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
+                                                 description) const
+    {
+        // Get the description as a map
+        typedef std::map<std::string, data::ConstElementPtr> Map;
+        Map map;
+        try {
+            map = description->mapValue();
+        }
+        catch (const data::TypeError&) {
+            isc_throw_1(LoaderError, "Check description is not a map",
+                        description);
+        }
+        // Call the internal part with extracted map
+        return (loadCheck(description, map));
+    }
+    /**
+     * \brief Load an ACL.
+     *
+     * This parses an ACL list, creates the checks and actions of each element
+     * and returns it. It may throw LoaderError if it isn't a list or the
+     * "action" key is missing in some element. Also, no exceptions from
+     * loadCheck (therefore from whatever creator is used) and from the
+     * actionLoader passed to constructor are not caught.
+     *
+     * \param description The JSON list of ACL.
+     */
+    boost::shared_ptr<ACL<Context, Action> > load(const data::ConstElementPtr&
+                                                  description) const
+    {
+        // We first check it's a list, so we can use the list reference
+        // (the list may be huge)
+        if (description->getType() != data::Element::list) {
+            isc_throw_1(LoaderError, "ACL not a list", description);
+        }
+        // First create an empty ACL
+        const List &list(description->listValue());
+        boost::shared_ptr<ACL<Context, Action> > result(
+            new ACL<Context, Action>(default_action_));
+        // Run trough the list of elements
+        for (List::const_iterator i(list.begin()); i != list.end(); ++i) {
+            Map map;
+            try {
+                map = (*i)->mapValue();
+            }
+            catch (const data::TypeError&) {
+                isc_throw_1(LoaderError, "ACL element not a map", *i);
+            }
+            // Create an action for the element
+            const Map::const_iterator action(map.find("action"));
+            if (action == map.end()) {
+                isc_throw_1(LoaderError, "No action in ACL element", *i);
+            }
+            const Action acValue(action_loader_(action->second));
+            // Now create the check if there's one
+            if (map.size() >= 2) { // One is the action, another one the check
+                result->append(loadCheck(*i, map), acValue);
+            } else {
+                // In case there's no check, this matches every time. We
+                // simulate it by our own private "True" check.
+                result->append(boost::shared_ptr<Check<Context> >(new True()),
+                               acValue);
+            }
+        }
+        return (result);
+    }
+private:
+    // Some type aliases to save typing
+    typedef std::map<std::string, boost::shared_ptr<CheckCreator> > Creators;
+    typedef std::map<std::string, data::ConstElementPtr> Map;
+    typedef std::vector<data::ConstElementPtr> List;
+    // Private members
+    Creators creators_;
+    const Action default_action_;
+    const boost::function1<Action, data::ConstElementPtr> action_loader_;
+    /**
+     * \brief Internal version of loadCheck.
+     *
+     * This is the internal part, shared between load and loadCheck.
+     * \param description The bit of JSON (used in exceptions).
+     * \param map The extracted map describing the check. It does change
+     *     the map.
+     */
+    boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
+                                                 description, Map& map) const
+    {
+        // Remove the action keyword
+        map.erase("action");
+        // Now, do we have any definition? Or is it and abbreviation?
+        switch (map.size()) {
+            case 0:
+                isc_throw_1(LoaderError, "Check description is empty",
+                            description);
+            case 1: {
+                // Get the first and only item
+                const Map::const_iterator checkDesc(map.begin());
+                const std::string& name(checkDesc->first);
+                const typename Creators::const_iterator
+                    creatorIt(creators_.find(name));
+                if (creatorIt == creators_.end()) {
+                    isc_throw_1(LoaderError, "No creator for ACL check " <<
+                                name, description);
+                }
+                if (creatorIt->second->allowListAbbreviation() &&
+                    checkDesc->second->getType() == data::Element::list) {
+                    // Or-abbreviated form - create an OR and put everything
+                    // inside.
+                    const std::vector<data::ConstElementPtr>&
+                        params(checkDesc->second->listValue());
+                    boost::shared_ptr<LogicOperator<AnyOfSpec, Context> >
+                        oper(new LogicOperator<AnyOfSpec, Context>);
+                    for (std::vector<data::ConstElementPtr>::const_iterator
+                             i(params.begin());
+                         i != params.end(); ++i) {
+                        oper->addSubexpression(
+                            creatorIt->second->create(name, *i, *this));
+                    }
+                    return (oper);
+                }
+                // Create the check and return it
+                return (creatorIt->second->create(name, checkDesc->second,
+                                                  *this));
+            }
+            default: {
+                // This is the AND-abbreviated form. We need to create an
+                // AND (or "ALL") operator, loop trough the whole map and
+                // fill it in. We do a small trick - we create bunch of
+                // single-item maps, call this loader recursively (therefore
+                // it will get into the "case 1" branch, where there is
+                // the actual loading) and use the results to fill the map.
+                //
+                // We keep the description the same, there's nothing we could
+                // take out (we could create a new one, but that would be
+                // confusing, as it is used for error messages only).
+                boost::shared_ptr<LogicOperator<AllOfSpec, Context> >
+                    oper(new LogicOperator<AllOfSpec, Context>);
+                for (Map::const_iterator i(map.begin()); i != map.end(); ++i) {
+                    Map singleSubexpr;
+                    singleSubexpr.insert(*i);
+                    oper->addSubexpression(loadCheck(description,
+                                                     singleSubexpr));
+                }
+                return (oper);
+            }
+        }
+    }
+    /**
+     * \brief Check that always matches.
+     *
+     * This one is used internally for ACL elements without condition. We may
+     * want to make this publicly accesible sometime maybe, but for now,
+     * there's no need.
+     */
+    class True : public Check<Context> {
+    public:
+        virtual bool matches(const Context&) const { return (true); };
+        virtual unsigned cost() const { return (1); }
+        // We don't write "true" here, as this one was created using empty
+        // input
+        virtual std::string toText() const { return ""; }
+    };
+};
+
+}
+}
+
+/*
+ * This include at the end of the file is unusual. But we need to include it,
+ * we use template classes from there. However, they need to be present only
+ * at instantiation of our class, which will happen below this header.
+ *
+ * The problem is, the header uses us as well, therefore there's a circular
+ * dependency. If we loaded it at the beginning and someone loaded us first,
+ * the logic_check header wouldn't have our definitions. This way, no matter
+ * in which order they are loaded, the definitions from this header will be
+ * above the ones from logic_check.
+ */
+#include "logic_check.h"
+
+#endif
diff --git a/src/lib/acl/logic_check.h b/src/lib/acl/logic_check.h
new file mode 100644
index 0000000..6e1c567
--- /dev/null
+++ b/src/lib/acl/logic_check.h
@@ -0,0 +1,206 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOGIC_CHECK_H
+#define ACL_LOGIC_CHECK_H
+
+#include "check.h"
+#include "loader.h"
+
+namespace isc {
+namespace acl {
+
+/// \brief Constants for the AnyOf implementation
+class AnyOfSpec {
+public:
+    static bool start() { return (false); }
+    static bool terminate(const bool another) {
+        return (another);
+    }
+};
+
+/// \brief Constants for the AllOf implementation
+class AllOfSpec {
+public:
+    static bool start() { return (true); }
+    static bool terminate(const bool another) {
+        return (!another);
+    }
+};
+
+/**
+ * \brief Logic operators
+ *
+ * This class implements the AllOf and AnyOf compound checks. As their
+ * behaviour is almost the same, the same template class is used. Which
+ * one it is depends on the Mode template parameter. The Mode should be
+ * one of AnyOfSpec or AllOfSpec, which provide some commands for the
+ * internal implementation. It would be nice to provide typedefs for
+ * them, but it is impossible to do so, as we have the Context template
+ * parameter as well and C++ doesn't like templated typedefs.
+ *
+ * The object holds several subexpressions and returns true if all
+ * of the subexpressions return true (in case of AllOfSpec Mode) or
+ * at last one of them return true (in case of AnyOfSpec Mode). If
+ * some subexpression guarantees the result (eg. some returns false
+ * in case of AllOfSpec), the rest is not tried for performance
+ * reasons.
+ */
+template<typename Mode, typename Context>
+class LogicOperator : public CompoundCheck<Context> {
+public:
+    /**
+     * \brief Add another subexpression.
+     *
+     * This adds another subexpression to the list of checked expressions.
+     * This is usually done shortly after the creation, before using the
+     * check for matches.
+     *
+     * Currently there's no way to place the expression into arbitrary place
+     * or to remove it. It might turn out it would be needed in future to
+     * optimise or it might even turn out we need shared pointers for it.
+     *
+     * \param expr The new expression to put inside.
+     */
+    void addSubexpression(const boost::shared_ptr<Check<Context> >& expr) {
+        checks_.push_back(expr);
+    }
+    /**
+     * \brief The current list of subexpressions.
+     */
+    virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+        typename CompoundCheck<Context>::Checks result;
+        for (typename Checks::const_iterator i(checks_.begin());
+             i != checks_.end(); ++i) {
+            result.push_back(i->get());
+        }
+        return (result);
+    }
+    /**
+     * \brief The match of the check.
+     *
+     * Runs the subexpressions, one by one, and then decides based on that
+     * what to return.
+     */
+    virtual bool matches(const Context& context) const {
+        /*
+         * This might look slightly complicated. However, this is just
+         * generalized version of multi-and or multi-or. The usual
+         * implementation of multi-and starts with true and if one with
+         * false is found, it turns to be false forever and false is
+         * returned. It is exactly the other way around with or.
+         *
+         * So, if we ever find one that makes it the other one than start
+         * (false in case of and, true in case of or), we can just stop and
+         * return that one right away. If it meets no such expression, we
+         * get to the end and return the default.
+         */
+        for (typename Checks::const_iterator i(checks_.begin());
+             i != checks_.end(); ++i) {
+            if (Mode::terminate((*i)->matches(context))) {
+                return (!Mode::start());
+            }
+        }
+        return (Mode::start());
+    }
+private:
+    /// \brief List of subexpressions
+    typedef typename std::vector<boost::shared_ptr<Check<Context> > > Checks;
+    Checks checks_;
+};
+
+/**
+ * \brief Creator for the LogicOperator compound check.
+ *
+ * This class can load the ANY and ALL operators from JSON. They expect
+ * a list of subexpressions as a parameter, eg. like this:
+ *
+ * \verbatim
+ * {"ANY": [
+ *    {"ip": "1.2.3.4"},
+ *    {"ip": "5.6.7.8"}
+ * ]}
+ * \endverbatim
+ *
+ * It uses the loader to load the subexpressions, therefore whatever is
+ * supported there is supported here as well.
+ *
+ * The Mode template parameter has the same meaning as with LogicOperator,
+ * it is used to know which operators to create.
+ */
+template<typename Mode, typename Context, typename Action = BasicAction>
+class LogicCreator : public Loader<Context, Action>::CheckCreator {
+public:
+    /**
+     * \brief Constructor.
+     *
+     * \param name The name for which the loader will work. In practice,
+     *     it will usually be ANY or ALL (depending on the mode), but
+     *     anything else can be used as well.
+     */
+    LogicCreator(const std::string& name) :
+        name_(name)
+    {}
+    /// \brief Returns vector containing the name.
+    virtual std::vector<std::string> names() const {
+        std::vector<std::string> result;
+        result.push_back(name_);
+        return (result);
+    }
+    /**
+     * \brief Converts a JSON description into the logic operator.
+     *
+     * This is the place where the actual loading happens. It creates
+     * the logic operator and calls the loader on each of the list
+     * elements, placing the result into the logic operator.
+     *
+     * The first parameter is ignored and is there only to match interface.
+     *
+     * \param definition The JSON definition of the subexpressions. This must
+     *     be a list (if it isn't, the LoaderError is thrown) and the elements
+     *     must be loadable by the loader (the exceptions from it are not
+     *     caught).
+     * \param loader The loader to use for loading of subexpressions.
+     */
+    virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+                                                      data::ConstElementPtr
+                                                      definition,
+                                                      const Loader<Context,
+                                                      Action>& loader)
+    {
+        std::vector<data::ConstElementPtr> subexprs;
+        try {
+            subexprs = definition->listValue();
+        }
+        catch (const data::TypeError&) {
+            isc_throw_1(LoaderError, "Logic operator takes list", definition);
+        }
+        boost::shared_ptr<LogicOperator<Mode, Context> >
+            result(new LogicOperator<Mode, Context>);
+        for (std::vector<data::ConstElementPtr>::const_iterator
+                 i(subexprs.begin());
+             i != subexprs.end(); ++i) {
+            result->addSubexpression(loader.loadCheck(*i));
+        }
+        return (result);
+    }
+    virtual bool allowListAbbreviation() const { return (false); }
+private:
+    const std::string name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index a6e90f7..f43e057 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -5,12 +5,25 @@ TESTS =
 if HAVE_GTEST
 TESTS += run_unittests
 run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += check_test.cc acl_test.cc
+run_unittests_SOURCES += acl_test.cc
+run_unittests_SOURCES += check_test.cc
+run_unittests_SOURCES += dns_test.cc
+run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += loader_test.cc
+run_unittests_SOURCES += logcheck.h
+run_unittests_SOURCES += creators.h
+run_unittests_SOURCES += logic_check_test.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 
 run_unittests_LDADD = $(GTEST_LDADD)
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
 endif
 
 noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/acl/tests/acl_test.cc b/src/lib/acl/tests/acl_test.cc
index 36baff6..5829fe7 100644
--- a/src/lib/acl/tests/acl_test.cc
+++ b/src/lib/acl/tests/acl_test.cc
@@ -12,75 +12,11 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include <gtest/gtest.h>
-#include <acl/acl.h>
-#include <cassert>
-
-using namespace isc::acl;
-using boost::shared_ptr;
+#include "logcheck.h"
 
 namespace {
 
-// This is arbitrary guess of size for the log. If it's too small for your
-// test, just make it bigger.
-const size_t LOG_SIZE = 10;
-
-// This will remember which checks did run already.
-struct Log {
-    // The actual log cells, if i-th check did run
-    mutable bool run[LOG_SIZE];
-    Log() {
-        // Nothing run yet
-        for (size_t i(0); i < LOG_SIZE; ++i) {
-            run[i] = false;
-        }
-    }
-    // Checks that the first amount of checks did run and the rest didn't.
-    void checkFirst(size_t amount) const {
-        ASSERT_LE(amount, LOG_SIZE) << "Wrong test: amount bigger than size "
-            "of log";
-        {
-            SCOPED_TRACE("Checking that the first amount of checks did run");
-            for (size_t i(0); i < amount; ++i) {
-                EXPECT_TRUE(run[i]) << "Check #" << i << " did not run.";
-            }
-        }
-
-        {
-            SCOPED_TRACE("Checking that the rest did not run");
-            for (size_t i(amount); i < LOG_SIZE; ++i) {
-                EXPECT_FALSE(run[i]) << "Check #" << i << "did run.";
-            }
-        }
-    }
-};
-
-// This returns true or false every time, no matter what is passed to it.
-// But it logs that it did run.
-class ConstCheck : public Check<Log> {
-public:
-    ConstCheck(bool accepts, size_t log_num) :
-        log_num_(log_num),
-        accepts_(accepts)
-    {
-        assert(log_num < LOG_SIZE); // If this fails, the LOG_SIZE is too small
-    }
-    /*
-     * This use of mutable log context is abuse for testing purposes.
-     * It is expected that the context will not be modified in the real
-     * applications of ACLs, but we want to know which checks were called
-     * and this is an easy way.
-     */
-    virtual bool matches(const Log& log) const {
-        log.run[log_num_] = true;
-        return (accepts_);
-    }
-private:
-    size_t log_num_;
-    bool accepts_;
-};
-
-// Test version of the ACL class. It adds few methods to examine the protected
+// Test version of the Acl class. It adds few methods to examine the protected
 // data, but does not change the implementation.
 class TestACL : public ACL<Log> {
 public:
diff --git a/src/lib/acl/tests/creators.h b/src/lib/acl/tests/creators.h
new file mode 100644
index 0000000..85f3444
--- /dev/null
+++ b/src/lib/acl/tests/creators.h
@@ -0,0 +1,154 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is not a public header, but some code shared between tests
+// This one contains various creators to test the loader and other creators
+
+#ifndef CREATORS_H
+#define CREATORS_H
+
+#include "logcheck.h"
+#include <acl/loader.h>
+#include <string>
+
+using isc::data::ConstElementPtr;
+using namespace std;
+using namespace boost;
+
+namespace {
+
+// Just for convenience, create JSON objects from JSON string
+ConstElementPtr el(const string& JSON) {
+    return (isc::data::Element::fromJSON(JSON));
+}
+
+// A check that doesn't check anything but remembers it's own name
+// and data
+class NamedCheck : public Check<Log> {
+public:
+    NamedCheck(const string& name, ConstElementPtr data) :
+        name_(name),
+        data_(data)
+    {}
+    virtual bool matches(const Log&) const { return (true); }
+    const string name_;
+    const ConstElementPtr data_;
+};
+
+// The creator of NamedCheck
+class NamedCreator : public Loader<Log>::CheckCreator {
+public:
+    NamedCreator(const string& name, bool abbreviatedList = true) :
+        abbreviated_list_(abbreviatedList)
+    {
+        names_.push_back(name);
+    }
+    NamedCreator(const vector<string>& names) :
+        names_(names),
+        abbreviated_list_(true)
+    {}
+    vector<string> names() const {
+        return (names_);
+    }
+    shared_ptr<Check<Log> > create(const string& name, ConstElementPtr data,
+                                   const Loader<Log>&)
+    {
+        bool found(false);
+        for (vector<string>::const_iterator i(names_.begin());
+             i != names_.end(); ++i) {
+            if (*i == name) {
+                found = true;
+                break;
+            }
+        }
+        EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
+            "doesn't handle it.";
+        return (shared_ptr<Check<Log> >(new NamedCheck(name, data)));
+    }
+    bool allowListAbbreviation() const {
+        return (abbreviated_list_);
+    }
+private:
+    vector<string> names_;
+    const bool abbreviated_list_;
+};
+
+// To be thrown in tests internally
+class TestCreatorError {};
+
+// This will throw every time it should create something
+class ThrowCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("throw");
+        return (result);
+    }
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
+                                   const Loader<Log>&)
+    {
+        throw TestCreatorError();
+    }
+};
+
+// This throws whenever the match is called on it
+class ThrowCheck : public Check<Log> {
+public:
+    virtual bool matches(const Log&) const {
+        throw TestCreatorError();
+    }
+};
+
+// And creator for it
+class ThrowCheckCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("throwcheck");
+        return (result);
+    }
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
+                                   const Loader<Log>&)
+    {
+        return (shared_ptr<Check<Log> >(new ThrowCheck()));
+    }
+};
+
+class LogCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("logcheck");
+        return (result);
+    }
+    /*
+     * For simplicity, we just take two values as a list, first is the
+     * logging cell used, the second is result of the check. No error checking
+     * is done, if there's bug in the test, it will throw TypeError for us.
+     */
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr definition,
+                                   const Loader<Log>&)
+    {
+        vector<ConstElementPtr> list(definition->listValue());
+        int logpos(list[0]->intValue());
+        bool accept(list[1]->boolValue());
+        return (shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
+    }
+    // We take a list, so don't interpret it for us
+    virtual bool allowListAbbreviation() const { return (false); }
+};
+
+}
+
+#endif
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
new file mode 100644
index 0000000..e5e0f3a
--- /dev/null
+++ b/src/lib/acl/tests/dns_test.cc
@@ -0,0 +1,35 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <acl/dns.h>
+#include <gtest/gtest.h>
+
+using namespace isc::acl::dns;
+
+namespace {
+
+// Tests that the getLoader actually returns something, returns the same every
+// time and the returned value can be used to anything. It is not much of a
+// test, but the getLoader is not much of a function.
+TEST(DNSACL, getLoader) {
+    Loader* l(&getLoader());
+    ASSERT_TRUE(l != NULL);
+    EXPECT_EQ(l, &getLoader());
+    EXPECT_NO_THROW(l->load(isc::data::Element::fromJSON(
+        "[{\"action\": \"DROP\"}]")));
+    // TODO Test that the things we should register by default, like IP based
+    // check, are loaded.
+}
+
+}
diff --git a/src/lib/acl/tests/ip_check_unittest.cc b/src/lib/acl/tests/ip_check_unittest.cc
new file mode 100644
index 0000000..3fcb05b
--- /dev/null
+++ b/src/lib/acl/tests/ip_check_unittest.cc
@@ -0,0 +1,588 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+#include <acl/ip_check.h>
+
+using namespace isc::acl;
+using namespace isc::acl::internal;
+using namespace std;
+
+namespace {
+const size_t IPV4_SIZE = 4;
+const size_t IPV6_SIZE = 16;
+
+// Simple struct holding either an IPV4 or IPV6 address.  This is the "Context"
+// used for the tests.
+//
+// The structure is also used for converting an IPV4 address to a four-byte
+// array.
+struct GeneralAddress {
+    int             family;     // Family of the address
+    vector<uint8_t> addr;       // Address type.  Size indicates what it holds
+
+    // Convert uint32_t address in host-byte order to a uint8_t vector in
+    // network-byte order.
+    vector<uint8_t> convertUint32(uint32_t address) {
+        BOOST_STATIC_ASSERT(sizeof(uint32_t) == IPV4_SIZE);
+
+        vector<uint8_t> result(IPV4_SIZE);
+
+        // Address is in network-byte order, so copy to the array.  The
+        // MS byte is at the lowest address.
+        result[3] = address & 0xff;
+        result[2] = (address >> 8) & 0xff;
+        result[1] = (address >> 16) & 0xff;
+        result[0] = (address >> 24) & 0xff;
+
+        return (result);
+    }
+
+    // Convenience constructor for V4 address.  As it is not marked as explicit,
+    // it allows the automatic promotion of a uint32_t to a GeneralAddress data
+    // type in calls to matches().
+    GeneralAddress(uint32_t address) : family(AF_INET), addr()
+    {
+        addr = convertUint32(address);
+    }
+
+    // Convenience constructor for V6 address.  As it is not marked as explicit,
+    // it allows the automatic promotion of a vector<uint8_t> to a
+    // GeneralAddress data type in calls to matches().
+    GeneralAddress(const vector<uint8_t>& address) : family(AF_INET6),
+                                                     addr(address)
+    {
+        if (address.size() != IPV6_SIZE) {
+            isc_throw(isc::InvalidParameter, "vector passed to GeneralAddress "
+                      "constructor is " << address.size() << " bytes long - it "
+                      "should be " << IPV6_SIZE << " bytes instead");
+        }
+    }
+
+    // A couple of convenience methods for checking equality with different
+    // representations of an address.
+
+    // Check that the IPV4 address is the same as that given.
+    bool equals(uint32_t address) {
+        if (family == AF_INET) {
+            const vector<uint8_t> byte_address = convertUint32(address);
+            return (equal(byte_address.begin(), byte_address.end(),
+                           addr.begin()));
+        }
+        return (false);
+    }
+
+    // Check that the array is equal to that given.
+    bool equals(const vector<uint8_t>& byte_address) {
+        if (addr.size() == byte_address.size()) {
+            return (equal(byte_address.begin(), byte_address.end(),
+                           addr.begin()));
+        }
+        return (false);
+    }
+};
+} // Unnamed namespace
+
+// Provide a specialisation of the IPCheck::matches() method for the
+// GeneralAddress class.
+
+namespace isc  {
+namespace acl {
+template <>
+bool IPCheck<GeneralAddress>::matches(const GeneralAddress& address) const {
+    return (compare(&address.addr[0], address.family));
+}
+} // namespace acl
+} // namespace isc
+
+namespace {
+/// *** Free Function Tests ***
+
+// Test the createMask() function.
+TEST(IPFunctionCheck, CreateMask) {
+
+    // Invalid arguments should throw.
+    EXPECT_THROW(createMask(9), isc::OutOfRange);
+
+    // Check on all possible 8-bit values.
+    uint16_t expected = 0xff00;
+    for (size_t i = 0; i <= 8; ++i, expected >>= 1) {
+        EXPECT_EQ(static_cast<uint8_t>(expected & 0xff), createMask(i));
+    }
+}
+
+// Test the splitIPAddress() function.
+TEST(IPFunctionCheck, SplitIPAddress) {
+    pair<string, uint32_t> result;
+
+    result = splitIPAddress("192.0.2.1");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(-1, result.second);
+
+    result = splitIPAddress("192.0.2.1/24");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(24, result.second);
+
+    result = splitIPAddress("2001:db8::/128");
+    EXPECT_EQ(string("2001:db8::"), result.first);
+    EXPECT_EQ(128, result.second);
+
+    result = splitIPAddress("192.0.2.1/0");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(0, result.second);
+
+    EXPECT_THROW(splitIPAddress("192.0.2.43/27 "), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43/-1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43//1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43/1/"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("/192.0.2.43/1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("2001:db8::/xxxx"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("2001:db8::/32/s"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("1/"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("/1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress(" 1/ "), isc::InvalidParameter);
+}
+
+// *** IPv4 Tests ***
+
+TEST(IPCheck, V4StringConstructor) {
+
+    // Constructor with no prefix length given (32 is assumed).
+    IPCheck<GeneralAddress> acl1("192.0.2.255");
+    EXPECT_EQ(32, acl1.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl1.getFamily());
+
+    vector<uint8_t> stored1 = acl1.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored1.size());
+    GeneralAddress expected1(0xc00002ff);
+    EXPECT_TRUE(expected1.equals(stored1));
+
+    // Constructor with valid mask given
+    IPCheck<GeneralAddress> acl2("192.0.2.0/24");
+    EXPECT_EQ(24, acl2.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl2.getFamily());
+
+    vector<uint8_t> stored2 = acl2.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored2.size());
+    GeneralAddress expected2(0xc0000200);
+    EXPECT_TRUE(expected2.equals(stored2));
+
+    // More valid masks
+    IPCheck<GeneralAddress> acl3("192.0.2.1/0");
+    EXPECT_EQ(0, acl3.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl3.getFamily());
+
+    vector<uint8_t> stored3 = acl3.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored3.size());
+    GeneralAddress expected3(0xc0000201);
+    EXPECT_TRUE(expected3.equals(stored3));
+
+    IPCheck<GeneralAddress> acl4("192.0.2.2/32");
+    EXPECT_EQ(32, acl4.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl4.getFamily());
+
+    vector<uint8_t> stored4 = acl4.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored4.size());
+    GeneralAddress expected4(0xc0000202);
+    EXPECT_TRUE(expected4.equals(stored4));
+
+    // Any match
+    IPCheck<GeneralAddress> acl5("any4");
+    EXPECT_EQ(0, acl5.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl5.getFamily());
+
+    vector<uint8_t> stored5 = acl5.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored5.size());
+    GeneralAddress expected5(0);
+    EXPECT_TRUE(expected5.equals(stored5));
+
+    // Invalid prefix lengths
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/33"), isc::OutOfRange);
+
+    // ... and invalid strings
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/-1"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/24/3"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/ww"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("aa.255.255.0/ww"),
+                 isc::InvalidParameter);
+}
+
+TEST(IPCheck, V4CopyConstructor) {
+    IPCheck<GeneralAddress> acl1("192.0.2.1/24");
+    IPCheck<GeneralAddress> acl2(acl1);
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+    EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+    vector<uint8_t> net1 = acl1.getMask();
+    vector<uint8_t> net2 = acl2.getMask();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+    net1 = acl1.getAddress();
+    net2 = acl2.getAddress();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+TEST(IPCheck, V4AssignmentOperator) {
+    IPCheck<GeneralAddress> acl1("192.0.2.0/24");
+    IPCheck<GeneralAddress> acl2("192.0.2.128/25");
+    acl2 = acl1;
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+    EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+    vector<uint8_t> net1 = acl1.getMask();
+    vector<uint8_t> net2 = acl2.getMask();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+    net1 = acl1.getAddress();
+    net2 = acl2.getAddress();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+// Check that the comparison works - note that "matches" just calls the
+// internal compare() code. (Also note that the argument to matches() will be
+// automatically converted to the GeneralAddress data type used for the tests
+// because of its constructor taking a uint32_t argument.
+
+TEST(IPCheck, V4Compare) {
+    // Exact address - match if given address matches stored address.
+    IPCheck<GeneralAddress> acl1("192.0.2.255/32");
+    EXPECT_TRUE(acl1.matches(0xc00002ff));
+    EXPECT_FALSE(acl1.matches(0xc00002fe));
+    EXPECT_FALSE(acl1.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl2("192.0.2.255/27");
+    EXPECT_TRUE(acl2.matches(0xc00002ff));
+    EXPECT_TRUE(acl2.matches(0xc00002fe));
+    EXPECT_TRUE(acl2.matches(0xc00002ee));
+    EXPECT_FALSE(acl2.matches(0xc00002de));
+    EXPECT_FALSE(acl2.matches(0xd00002fe));
+    EXPECT_FALSE(acl2.matches(0x13457f13));
+
+    // Match if "any4" is specified
+    IPCheck<GeneralAddress> acl3("any4");
+    EXPECT_TRUE(acl3.matches(0xc00002ff));
+    EXPECT_TRUE(acl3.matches(0xc00002fe));
+    EXPECT_TRUE(acl3.matches(0xc00002ee));
+    EXPECT_TRUE(acl3.matches(0xc00002de));
+    EXPECT_TRUE(acl3.matches(0xd00002fe));
+    EXPECT_TRUE(acl3.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl4("0.0.0.0/0");
+    EXPECT_TRUE(acl4.matches(0xc00002ff));
+    EXPECT_TRUE(acl4.matches(0xc00002fe));
+    EXPECT_TRUE(acl4.matches(0xc00002ee));
+    EXPECT_TRUE(acl4.matches(0xc00002de));
+    EXPECT_TRUE(acl4.matches(0xd00002fe));
+    EXPECT_TRUE(acl4.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl5("192.0.2.255/0");
+    EXPECT_TRUE(acl5.matches(0xc00002ff));
+    EXPECT_TRUE(acl5.matches(0xc00002fe));
+    EXPECT_TRUE(acl5.matches(0xc00002ee));
+    EXPECT_TRUE(acl5.matches(0xc00002de));
+    EXPECT_TRUE(acl5.matches(0xd00002fe));
+    EXPECT_TRUE(acl5.matches(0x13457f13));
+}
+
+// *** IPV6 Tests ***
+
+// Some constants used in the tests
+
+const char* V6ADDR_1_STRING = "2001:0db8:1122:3344:5566:7788:99aa:bbcc";
+const uint8_t V6ADDR_1[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x11, 0x22, 0x33, 0x44,
+    0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc
+};
+
+const char* V6ADDR_2_STRING = "2001:0db8::dead:beef";
+const uint8_t V6ADDR_2[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 48 bits
+const uint8_t V6ADDR_2_48[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0xff, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 49 bits
+const uint8_t V6ADDR_2_49[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x7f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 50 bits
+const uint8_t V6ADDR_2_50[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x3f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_51[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x1f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_52[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x0f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 127 bits
+const uint8_t V6ADDR_2_127[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xee
+};
+
+const uint8_t V6ADDR_3[] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const uint8_t V6ADDR_4[] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+TEST(IPCheck, V6StringConstructor) {
+    IPCheck<GeneralAddress> acl1(V6ADDR_1_STRING);
+    vector<uint8_t> address = acl1.getAddress();
+
+    EXPECT_EQ(128, acl1.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl1.getFamily());
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_1));
+
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/51"));
+    address = acl2.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(51, acl2.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl2.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+    IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/127"));
+    address = acl3.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(127, acl3.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl3.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+    IPCheck<GeneralAddress> acl4("::1");
+    address = acl4.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(128, acl4.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl4.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_3));
+
+    // Any match.  In these cases, the address should all be zeroes.
+    IPCheck<GeneralAddress> acl5("any6");
+    address = acl5.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(0, acl5.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl5.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+    IPCheck<GeneralAddress> acl6("::/0");
+    address = acl6.getAddress();
+    EXPECT_EQ(0, acl6.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl6.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+    // Some invalid strings
+    EXPECT_THROW(IPCheck<GeneralAddress>("::1/129"), isc::OutOfRange);
+    EXPECT_THROW(IPCheck<GeneralAddress>("::1/24/3"), isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>(":::1/24"), isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("2001:0db8::abcd/ww"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("2xx1:0db8::abcd/32"),
+                 isc::InvalidParameter);
+}
+
+TEST(IPCheck, V6CopyConstructor) {
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+    IPCheck<GeneralAddress> acl2(acl1);
+
+    vector<uint8_t> acl1_address = acl1.getAddress();
+    vector<uint8_t> acl2_address = acl1.getAddress();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+    EXPECT_EQ(acl1_address.size(), acl2_address.size());
+    EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+                acl2_address.begin()));
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+    vector<uint8_t> acl1_mask = acl1.getMask();
+    vector<uint8_t> acl2_mask = acl1.getMask();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+    EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+    EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+                acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6AssignmentOperator) {
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_1_STRING) + string("/48"));
+
+    acl2 = acl1;
+
+    vector<uint8_t> acl1_address = acl1.getAddress();
+    vector<uint8_t> acl2_address = acl2.getAddress();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+    EXPECT_EQ(acl1_address.size(), acl2_address.size());
+    EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+                acl2_address.begin()));
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+    vector<uint8_t> acl1_mask = acl1.getMask();
+    vector<uint8_t> acl2_mask = acl2.getMask();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+    EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+    EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+                acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6Compare) {
+    // Set up some data.
+    vector<uint8_t> v6addr_2(V6ADDR_2, V6ADDR_2 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_48(V6ADDR_2_48, V6ADDR_2_48 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_49(V6ADDR_2_49, V6ADDR_2_49 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_50(V6ADDR_2_50, V6ADDR_2_50 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_51(V6ADDR_2_51, V6ADDR_2_51 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_52(V6ADDR_2_52, V6ADDR_2_52 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_127(V6ADDR_2_127, V6ADDR_2_127 + IPV6_SIZE);
+    vector<uint8_t> v6addr_3(V6ADDR_3, V6ADDR_3 + IPV6_SIZE);
+
+    // Exact address - match if given address matches stored address.
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/128"));
+    EXPECT_TRUE(acl1.matches(v6addr_2));
+    EXPECT_FALSE(acl1.matches(v6addr_2_127));
+    EXPECT_FALSE(acl1.matches(v6addr_2_52));
+    EXPECT_FALSE(acl1.matches(v6addr_2_51));
+    EXPECT_FALSE(acl1.matches(v6addr_2_50));
+    EXPECT_FALSE(acl1.matches(v6addr_2_49));
+    EXPECT_FALSE(acl1.matches(v6addr_2_48));
+    EXPECT_FALSE(acl1.matches(v6addr_3));
+
+    // Match to various prefixes.
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/127"));
+    EXPECT_TRUE(acl2.matches(v6addr_2));
+    EXPECT_TRUE(acl2.matches(v6addr_2_127));
+    EXPECT_FALSE(acl2.matches(v6addr_2_52));
+    EXPECT_FALSE(acl2.matches(v6addr_2_51));
+    EXPECT_FALSE(acl2.matches(v6addr_2_50));
+    EXPECT_FALSE(acl2.matches(v6addr_2_49));
+    EXPECT_FALSE(acl2.matches(v6addr_2_48));
+    EXPECT_FALSE(acl2.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/52"));
+    EXPECT_TRUE(acl3.matches(v6addr_2));
+    EXPECT_TRUE(acl3.matches(v6addr_2_127));
+    EXPECT_TRUE(acl3.matches(v6addr_2_52));
+    EXPECT_FALSE(acl3.matches(v6addr_2_51));
+    EXPECT_FALSE(acl3.matches(v6addr_2_50));
+    EXPECT_FALSE(acl3.matches(v6addr_2_49));
+    EXPECT_FALSE(acl3.matches(v6addr_2_48));
+    EXPECT_FALSE(acl3.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl4(string(V6ADDR_2_STRING) + string("/51"));
+    EXPECT_TRUE(acl4.matches(v6addr_2));
+    EXPECT_TRUE(acl4.matches(v6addr_2_127));
+    EXPECT_TRUE(acl4.matches(v6addr_2_52));
+    EXPECT_TRUE(acl4.matches(v6addr_2_51));
+    EXPECT_FALSE(acl4.matches(v6addr_2_50));
+    EXPECT_FALSE(acl4.matches(v6addr_2_49));
+    EXPECT_FALSE(acl4.matches(v6addr_2_48));
+    EXPECT_FALSE(acl4.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl5(string(V6ADDR_2_STRING) + string("/50"));
+    EXPECT_TRUE(acl5.matches(v6addr_2));
+    EXPECT_TRUE(acl5.matches(v6addr_2_127));
+    EXPECT_TRUE(acl5.matches(v6addr_2_52));
+    EXPECT_TRUE(acl5.matches(v6addr_2_51));
+    EXPECT_TRUE(acl5.matches(v6addr_2_50));
+    EXPECT_FALSE(acl5.matches(v6addr_2_49));
+    EXPECT_FALSE(acl5.matches(v6addr_2_48));
+    EXPECT_FALSE(acl5.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl6(string(V6ADDR_2_STRING) + string("/0"));
+    EXPECT_TRUE(acl6.matches(v6addr_2));
+    EXPECT_TRUE(acl6.matches(v6addr_2_127));
+    EXPECT_TRUE(acl6.matches(v6addr_2_52));
+    EXPECT_TRUE(acl6.matches(v6addr_2_51));
+    EXPECT_TRUE(acl6.matches(v6addr_2_50));
+    EXPECT_TRUE(acl6.matches(v6addr_2_49));
+    EXPECT_TRUE(acl6.matches(v6addr_2_48));
+    EXPECT_TRUE(acl6.matches(v6addr_3));
+
+    // Match on any address
+    IPCheck<GeneralAddress> acl7("any6");
+    EXPECT_TRUE(acl7.matches(v6addr_2));
+    EXPECT_TRUE(acl7.matches(v6addr_2_127));
+    EXPECT_TRUE(acl7.matches(v6addr_2_52));
+    EXPECT_TRUE(acl7.matches(v6addr_2_51));
+    EXPECT_TRUE(acl7.matches(v6addr_2_50));
+    EXPECT_TRUE(acl7.matches(v6addr_2_49));
+    EXPECT_TRUE(acl7.matches(v6addr_2_48));
+}
+
+// *** Mixed-mode tests - mainly to check that no exception is thrown ***
+
+TEST(IPCheck, MixedMode) {
+
+    // ACL has a V4 address specified, check against a V6 address.
+    IPCheck<GeneralAddress> acl1("192.0.2.255/24");
+    GeneralAddress test1(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+    EXPECT_NO_THROW(acl1.matches(test1));
+    EXPECT_FALSE(acl1.matches(test1));
+
+    // Now the reverse - the ACL is specified with a V6 address.
+    IPCheck<GeneralAddress> acl2(V6ADDR_2_STRING);
+    GeneralAddress test2(0x12345678);
+    EXPECT_FALSE(acl2.matches(test2));
+
+    // Ensure only a V4 address matches "any4".
+    IPCheck<GeneralAddress> acl3("any4");
+    EXPECT_FALSE(acl3.matches(test1));
+    EXPECT_TRUE(acl3.matches(test2));
+
+    // ... and check the reverse
+    IPCheck<GeneralAddress> acl4("any6");
+    EXPECT_TRUE(acl4.matches(test1));
+    EXPECT_FALSE(acl4.matches(test2));
+
+    // Check where the bit pattern of an IPv4 address matches that of an IPv6
+    // one.
+    IPCheck<GeneralAddress> acl5("2001:db8::/32");
+    GeneralAddress test5(0x20010db8);
+    EXPECT_FALSE(acl5.matches(test5));
+
+    // ... and where the reverse is true. (2001:db8 corresponds to 32.1.13.184).
+    IPCheck<GeneralAddress> acl6("32.1.13.184");
+    GeneralAddress test6(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+    EXPECT_FALSE(acl6.matches(test6));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/loader_test.cc b/src/lib/acl/tests/loader_test.cc
new file mode 100644
index 0000000..7dc088d
--- /dev/null
+++ b/src/lib/acl/tests/loader_test.cc
@@ -0,0 +1,371 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/loader.h>
+#include <string>
+#include <gtest/gtest.h>
+
+using namespace std;
+using namespace boost;
+
+namespace {
+
+// We don't use the EXPECT_THROW macro, as it doesn't allow us
+// to examine the exception. We want to check the element is stored
+// there as well.
+void testActionLoaderException(const string& JSON) {
+    SCOPED_TRACE("Should throw with input: " + JSON);
+    ConstElementPtr elem(el(JSON));
+    try {
+        defaultActionLoader(elem);
+        FAIL() << "It did not throw";
+    }
+    catch (const LoaderError& error) {
+        // Yes, comparing for pointer equality, that is enough, it
+        // should return the exact instance of the JSON object
+        EXPECT_EQ(elem, error.element());
+    }
+}
+
+// Test the defaultActionLoader function
+TEST(LoaderHelpers, DefaultActionLoader) {
+    // First the three valid inputs
+    EXPECT_EQ(ACCEPT, defaultActionLoader(el("\"ACCEPT\"")));
+    EXPECT_EQ(REJECT, defaultActionLoader(el("\"REJECT\"")));
+    EXPECT_EQ(DROP, defaultActionLoader(el("\"DROP\"")));
+    // Now few invalid ones
+    // String, but unknown one
+    testActionLoaderException("\"UNKNOWN\"");
+    testActionLoaderException("42");
+    testActionLoaderException("true");
+    testActionLoaderException("null");
+    testActionLoaderException("[]");
+    testActionLoaderException("{}");
+}
+
+class LoaderTest : public ::testing::Test {
+public:
+    LoaderTest() :
+        loader_(REJECT)
+    {}
+    Loader<Log> loader_;
+    Log log_;
+    // Some convenience functions to set up
+
+    // Create a NamedCreator, convert to shared pointer
+    shared_ptr<NamedCreator> namedCreator(const string& name,
+                                          bool abbreviatedList = true)
+    {
+        return (shared_ptr<NamedCreator>(new NamedCreator(name,
+                                                          abbreviatedList)));
+    }
+    // Create and add a NamedCreator
+    void addNamed(const string& name, bool abbreviatedList = true) {
+        EXPECT_NO_THROW(loader_.registerCreator(
+            namedCreator(name, abbreviatedList)));
+    }
+    template<class Result> shared_ptr<Result> loadCheckAny(const string&
+                                                               definition)
+    {
+        SCOPED_TRACE("Loading check " + definition);
+        shared_ptr<Check<Log> > loaded;
+        EXPECT_NO_THROW(loaded = loader_.loadCheck(el(definition)));
+        shared_ptr<Result> result(dynamic_pointer_cast<Result>(
+            loaded));
+        EXPECT_TRUE(result);
+        return (result);
+    }
+    // Load a check and convert it to named check to examine it
+    shared_ptr<NamedCheck> loadCheck(const string& definition) {
+        return (loadCheckAny<NamedCheck>(definition));
+    }
+    // The loadCheck throws an exception
+    void checkException(const string& JSON) {
+        SCOPED_TRACE("Loading check exception: " + JSON);
+        ConstElementPtr input(el(JSON));
+        // Not using EXPECT_THROW, we want to examine the exception
+        try {
+            loader_.loadCheck(input);
+            FAIL() << "Should have thrown";
+        }
+        catch (const LoaderError& e) {
+            // It should be identical copy, so checking pointers
+            EXPECT_EQ(input, e.element());
+        }
+    }
+    // Insert the throw, throwcheck and logcheck checks into the loader
+    void aclSetup() {
+        try {
+            loader_.registerCreator(shared_ptr<ThrowCreator>(new
+                                                             ThrowCreator()));
+            loader_.registerCreator(shared_ptr<ThrowCheckCreator>(
+                new ThrowCheckCreator()));
+            loader_.registerCreator(shared_ptr<LogCreator>(new LogCreator()));
+        }
+        // We ignore this exception here, because it happens when we try to
+        // insert the creators multiple times. This is harmless.
+        catch (const LoaderError&) {}
+    }
+    // Create an ACL, run it, check it's result and how many first
+    // log items it marked
+    //
+    // Works with preset names throw and logcheck
+    void aclRun(const string& JSON, BasicAction expectedResult,
+                size_t logged)
+    {
+        SCOPED_TRACE("Running ACL for " + JSON);
+        aclSetup();
+        shared_ptr<ACL<Log> > acl;
+        EXPECT_NO_THROW(acl = loader_.load(el(JSON)));
+        EXPECT_EQ(expectedResult, acl->execute(log_));
+        log_.checkFirst(logged);
+    }
+    // Check it throws an error when creating the ACL
+    void aclException(const string& JSON) {
+        SCOPED_TRACE("Trying to load bad " + JSON);
+        aclSetup();
+        EXPECT_THROW(loader_.load(el(JSON)), LoaderError);
+    }
+    // Check that the subexpression is NamedCheck with correct data
+    void isSubexprNamed(const CompoundCheck<Log>* compound, size_t index,
+                        const string& name, ConstElementPtr data)
+    {
+        if (index < compound->getSubexpressions().size()) {
+            const NamedCheck*
+                check(dynamic_cast<const NamedCheck*>(compound->
+                                                      getSubexpressions()
+                                                      [index]));
+            ASSERT_TRUE(check) << "The subexpression is of different type";
+            EXPECT_EQ(name, check->name_);
+            EXPECT_TRUE(data->equals(*check->data_));
+        }
+    }
+};
+
+// Test that it does not accept duplicate creator
+TEST_F(LoaderTest, CreatorDuplicity) {
+    addNamed("name");
+    EXPECT_THROW(loader_.registerCreator(namedCreator("name")), LoaderError);
+}
+
+// Test that when it does not accept a duplicate, nothing is inserted
+TEST_F(LoaderTest, CreatorDuplicateUnchanged) {
+    addNamed("name1");
+    vector<string> names;
+    names.push_back("name2");
+    names.push_back("name1");
+    names.push_back("name3");
+    EXPECT_THROW(loader_.registerCreator(
+        shared_ptr<NamedCreator>(new NamedCreator(names))), LoaderError);
+    // It should now reject both name2 and name3 as not known
+    checkException("{\"name2\": null}");
+    checkException("{\"name3\": null}");
+}
+
+// Test that we can register a creator and load a check with the name
+TEST_F(LoaderTest, SimpleCheckLoad) {
+    addNamed("name");
+    shared_ptr<NamedCheck> check(loadCheck("{\"name\": 42}"));
+    EXPECT_EQ("name", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("42")));
+}
+
+// As above, but there are multiple creators registered within the loader
+TEST_F(LoaderTest, MultiCreatorCheckLoad) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<NamedCheck> check(loadCheck("{\"name2\": 42}"));
+    EXPECT_EQ("name2", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("42")));
+}
+
+// Similar to above, but there's a creator with multiple names
+TEST_F(LoaderTest, MultiNameCheckLoad) {
+    addNamed("name1");
+    vector<string> names;
+    names.push_back("name2");
+    names.push_back("name3");
+    EXPECT_NO_THROW(loader_.registerCreator(shared_ptr<NamedCreator>(
+        new NamedCreator(names))));
+    shared_ptr<NamedCheck> check(loadCheck("{\"name3\": 42}"));
+    EXPECT_EQ("name3", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("42")));
+}
+
+// Invalid format is rejected
+TEST_F(LoaderTest, InvalidFormatCheck) {
+    checkException("[]");
+    checkException("42");
+    checkException("\"hello\"");
+    checkException("null");
+}
+
+// Empty check is rejected
+TEST_F(LoaderTest, EmptyCheck) {
+    checkException("{}");
+}
+
+// The name isn't known
+TEST_F(LoaderTest, UnkownName) {
+    checkException("{\"unknown\": null}");
+}
+
+// Exception from the creator is propagated
+TEST_F(LoaderTest, CheckPropagate) {
+    loader_.registerCreator(shared_ptr<ThrowCreator>(new ThrowCreator()));
+    EXPECT_THROW(loader_.loadCheck(el("{\"throw\": null}")), TestCreatorError);
+}
+
+// The abbreviated form of check
+TEST_F(LoaderTest, AndAbbrev) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": 2}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        EXPECT_EQ(2, oper->getSubexpressions().size());
+        // Note: this test relies on the ordering in which map returns it's
+        // elements, which is in the lexicographical order of the strings.
+        // This is not required from our interface, but is easier to write
+        // the test.
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        isSubexprNamed(&*oper, 1, "name2", el("2"));
+    }
+}
+
+// The abbreviated form of parameters
+TEST_F(LoaderTest, OrAbbrev) {
+    addNamed("name1");
+    shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AnyOfSpec, Log> >("{\"name1\": [1, 2]}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        EXPECT_EQ(2, oper->getSubexpressions().size());
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        isSubexprNamed(&*oper, 1, "name1", el("2"));
+    }
+}
+
+// Combined abbreviated form, both at once
+
+// The abbreviated form of check
+TEST_F(LoaderTest, BothAbbrev) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": [3, 4]}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        ASSERT_EQ(2, oper->getSubexpressions().size());
+        // Note: this test relies on the ordering in which map returns it's
+        // elements, which is in the lexicographical order of the strings.
+        // This is not required from our interface, but is easier to write
+        // the test.
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        const LogicOperator<AnyOfSpec, Log>*
+            orOper(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>(
+            oper->getSubexpressions()[1]));
+        ASSERT_TRUE(orOper) << "Different type than AnyOf operator";
+        EXPECT_EQ(2, orOper->getSubexpressions().size());
+        isSubexprNamed(orOper, 0, "name2", el("3"));
+        isSubexprNamed(orOper, 1, "name2", el("4"));
+    }
+}
+
+// But this is not abbreviated form, this should be passed directly to the
+// creator
+TEST_F(LoaderTest, ListCheck) {
+    addNamed("name1", false);
+    shared_ptr<NamedCheck> check(loadCheck("{\"name1\": [1, 2]}"));
+    EXPECT_EQ("name1", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("[1, 2]")));
+}
+
+// Check the action key is ignored as it should be
+TEST_F(LoaderTest, CheckNoAction) {
+    addNamed("name1");
+    shared_ptr<NamedCheck> check(loadCheck("{\"name1\": 1, \"action\": 2}"));
+    EXPECT_EQ("name1", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("1")));
+}
+
+// The empty ACL can be created and run, providing the default action
+TEST_F(LoaderTest, EmptyACL) {
+    aclRun("[]", REJECT, 0);
+}
+
+// We can create a simple ACL, which will return the correct default
+// action
+TEST_F(LoaderTest, NoMatchACL) {
+    aclRun("[{\"logcheck\": [0, false], \"action\": \"ACCEPT\"}]",
+           REJECT, 1);
+}
+
+// We can created more complicated ACL, it will match at the second
+// check
+TEST_F(LoaderTest, MatchACL) {
+    aclRun("["
+           "  {\"logcheck\": [0, false], \"action\": \"DROP\"},"
+           "  {\"logcheck\": [1, true], \"action\": \"ACCEPT\"}"
+           "]", ACCEPT, 2);
+}
+
+// ACL without a check (matches unconditionally)
+// We add another one check after it, to make sure it is really not run
+TEST_F(LoaderTest, NoCheckACL) {
+    aclRun("["
+           "  {\"action\": \"DROP\"},"
+           "  {\"throwcheck\": 1, \"action\": \"ACCEPT\"}"
+           "]", DROP, 0);
+}
+
+// Malformed things are rejected
+TEST_F(LoaderTest, InvalidACLFormat) {
+    // Not a list
+    aclException("{}");
+    aclException("42");
+    aclException("true");
+    aclException("null");
+    aclException("\"hello\"");
+    // Malformed element
+    aclException("[42]");
+    aclException("[\"hello\"]");
+    aclException("[[]]");
+    aclException("[true]");
+    aclException("[null]");
+}
+
+// If there's no action keyword, it is rejected
+TEST_F(LoaderTest, NoAction) {
+    aclException("[{}]");
+    aclException("[{\"logcheck\": [0, true]}]");
+}
+
+// Exceptions from check creation is propagated
+TEST_F(LoaderTest, ACLPropagate) {
+    aclSetup();
+    EXPECT_THROW(loader_.load(el("[{\"action\": \"ACCEPT\", \"throw\": 1}]")),
+                 TestCreatorError);
+
+}
+
+}
diff --git a/src/lib/acl/tests/logcheck.h b/src/lib/acl/tests/logcheck.h
new file mode 100644
index 0000000..776ff53
--- /dev/null
+++ b/src/lib/acl/tests/logcheck.h
@@ -0,0 +1,91 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LOGCHECK_H
+#define LOGCHECK_H
+
+#include <gtest/gtest.h>
+#include <acl/acl.h>
+#include <cassert>
+
+// This is not a public header, it is used only inside the tests. Therefore
+// we lower the standards a bit and use anonymous namespace in the header
+// and "using", just for convenience. This is just to share little bit of code
+// between multiple tests.
+using namespace isc::acl;
+using boost::shared_ptr;
+
+namespace {
+
+// This is arbitrary guess of size for the log. If it's too small for your
+// test, just make it bigger.
+const size_t LOG_SIZE = 10;
+
+// This will remember which checks did run already.
+struct Log {
+    // The actual log cells, if i-th check did run
+    mutable bool run[LOG_SIZE];
+    Log() {
+        // Nothing run yet
+        for (size_t i(0); i < LOG_SIZE; ++ i) {
+            run[i] = false;
+        }
+    }
+    // Checks that the first amount of checks did run and the rest didn't.
+    void checkFirst(size_t amount) const {
+        ASSERT_LE(amount, LOG_SIZE) << "Wrong test: amount bigger than size "
+            "of log";
+        {
+            SCOPED_TRACE("Checking that the first amount of checks did run");
+            for (size_t i(0); i < amount; ++ i) {
+                EXPECT_TRUE(run[i]) << "Check #" << i << " did not run.";
+            }
+        }
+
+        {
+            SCOPED_TRACE("Checking that the rest did not run");
+            for (size_t i(amount); i < LOG_SIZE; ++ i) {
+                EXPECT_FALSE(run[i]) << "Check #" << i << "did run.";
+            }
+        }
+    }
+};
+
+// This returns true or false every time, no matter what is passed to it.
+// But it logs that it did run.
+class ConstCheck : public Check<Log> {
+public:
+    ConstCheck(bool accepts, size_t logNum) :
+        logNum_(logNum),
+        accepts_(accepts)
+    {
+        assert(logNum < LOG_SIZE); // If this fails, the LOG_SIZE is too small
+    }
+    virtual bool matches(const Log& log) const {
+        /*
+         * This is abuse of the context. It is designed to carry the
+         * information to check, not to modify it. However, this is the
+         * easiest way to do the test, so we go against the design.
+         */
+        log.run[logNum_] = true;
+        return (accepts_);
+    }
+private:
+    size_t logNum_;
+    bool accepts_;
+};
+
+}
+
+#endif
diff --git a/src/lib/acl/tests/logic_check_test.cc b/src/lib/acl/tests/logic_check_test.cc
new file mode 100644
index 0000000..b165ff0
--- /dev/null
+++ b/src/lib/acl/tests/logic_check_test.cc
@@ -0,0 +1,228 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/logic_check.h>
+#include <typeinfo>
+
+using namespace isc::acl;
+
+namespace {
+
+// Test the defs in AnyOfSpec
+TEST(LogicOperators, AnyOfSpec) {
+    EXPECT_FALSE(AnyOfSpec::start());
+    EXPECT_FALSE(AnyOfSpec::terminate(false));
+    EXPECT_TRUE(AnyOfSpec::terminate(true));
+}
+
+// Test the defs in AllOfSpec
+TEST(LogicOperators, AllOfSpec) {
+    EXPECT_TRUE(AllOfSpec::start());
+    EXPECT_TRUE(AllOfSpec::terminate(false));
+    EXPECT_FALSE(AllOfSpec::terminate(true));
+}
+
+// Generic test of one check
+template<typename Mode>
+void
+testCheck(bool emptyResult) {
+    // It can be created
+    LogicOperator<Mode, Log> oper;
+    // It is empty by default
+    EXPECT_EQ(0, oper.getSubexpressions().size());
+    // And returns true, as all 0 of the subexpressions return true
+    Log log;
+    EXPECT_EQ(emptyResult, oper.matches(log));
+    log.checkFirst(0);
+    // Fill it with some subexpressions
+    typedef shared_ptr<ConstCheck> CheckPtr;
+    oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 0)));
+    oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 1)));
+    // Check what happens when only the default-valued are there
+    EXPECT_EQ(2, oper.getSubexpressions().size());
+    EXPECT_EQ(emptyResult, oper.matches(log));
+    log.checkFirst(2);
+    oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 2)));
+    oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 3)));
+    // They are listed there
+    EXPECT_EQ(4, oper.getSubexpressions().size());
+    // Now, the last one kills it, but the first ones will run, the fourth
+    // won't
+    EXPECT_EQ(!emptyResult, oper.matches(log));
+    log.checkFirst(3);
+}
+
+TEST(LogicOperators, AllOf) {
+    testCheck<AllOfSpec>(true);
+}
+
+TEST(LogicOperators, AnyOf) {
+    testCheck<AnyOfSpec>(false);
+}
+
+// Fixture for the tests of the creators
+class LogicCreatorTest : public ::testing::Test {
+private:
+    typedef shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
+public:
+    // Register some creators, both tested ones and some auxiliary ones for
+    // help
+    LogicCreatorTest():
+        loader_(REJECT)
+    {
+        loader_.registerCreator(CreatorPtr(new
+            LogicCreator<AnyOfSpec, Log>("ANY")));
+        loader_.registerCreator(CreatorPtr(new
+            LogicCreator<AllOfSpec, Log>("ALL")));
+        loader_.registerCreator(CreatorPtr(new ThrowCreator));
+        loader_.registerCreator(CreatorPtr(new LogCreator));
+    }
+    // To mark which parts of the check did run
+    Log log_;
+    // The loader
+    Loader<Log> loader_;
+    // Some convenience shortcut names
+    typedef LogicOperator<AnyOfSpec, Log> AnyOf;
+    typedef LogicOperator<AllOfSpec, Log> AllOf;
+    typedef shared_ptr<AnyOf> AnyOfPtr;
+    typedef shared_ptr<AllOf> AllOfPtr;
+    // Loads the JSON as a check and tries to convert it to the given check
+    // subclass
+    template<typename Result> shared_ptr<Result> load(const string& JSON) {
+        shared_ptr<Check<Log> > result;
+        EXPECT_NO_THROW(result = loader_.loadCheck(el(JSON)));
+        /*
+         * Optimally, we would use a dynamic_pointer_cast here to both
+         * convert the pointer and to check the type is correct. However,
+         * clang++ seems to be confused by templates and creates two typeids
+         * for the same templated type (even with the same parameters),
+         * therfore considering the types different, even if they are the same.
+         * This leads to false alarm in the test. Luckily, it generates the
+         * same name for both typeids, so we use them instead (which is enough
+         * to test the correct type of Check is returned). Then we can safely
+         * cast statically, as we don't use any kind of nasty things like
+         * multiple inheritance.
+         */
+        EXPECT_STREQ(typeid(Result).name(), typeid(*result.get()).name());
+        shared_ptr<Result>
+            resultConverted(static_pointer_cast<Result>(result));
+        EXPECT_NE(shared_ptr<Result>(), resultConverted);
+        return (resultConverted);
+    }
+};
+
+// Test it can load empty ones
+TEST_F(LogicCreatorTest, empty) {
+    AnyOfPtr emptyAny(load<AnyOf>("{\"ANY\": []}"));
+    EXPECT_EQ(0, emptyAny->getSubexpressions().size());
+    AllOfPtr emptyAll(load<AllOf>("{\"ALL\": []}"));
+    EXPECT_EQ(0, emptyAll->getSubexpressions().size());
+}
+
+// Test it rejects invalid inputs (not a list as a parameter)
+TEST_F(LogicCreatorTest, invalid) {
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": null}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": {}}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": true}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": 42}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": \"hello\"}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": null}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": {}}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": true}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": 42}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": \"hello\"}")), LoaderError);
+}
+
+// Exceptions from subexpression creation isn't caught
+TEST_F(LogicCreatorTest, propagate) {
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": [{\"throw\": null}]}")),
+                 TestCreatorError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": [{\"throw\": null}]}")),
+                 TestCreatorError);
+}
+
+// We can create more complex ANY check and run it correctly
+TEST_F(LogicCreatorTest, anyRun) {
+    AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+                             "    {\"logcheck\": [0, false]},"
+                             "    {\"logcheck\": [1, true]},"
+                             "    {\"logcheck\": [2, true]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_TRUE(any->matches(log_));
+    log_.checkFirst(2);
+}
+
+// We can create more complex ALL check and run it correctly
+TEST_F(LogicCreatorTest, allRun) {
+    AllOfPtr any(load<AllOf>("{\"ALL\": ["
+                             "    {\"logcheck\": [0, true]},"
+                             "    {\"logcheck\": [1, false]},"
+                             "    {\"logcheck\": [2, false]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_FALSE(any->matches(log_));
+    log_.checkFirst(2);
+}
+
+// Or is able to return false
+TEST_F(LogicCreatorTest, anyFalse) {
+    AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+                             "    {\"logcheck\": [0, false]},"
+                             "    {\"logcheck\": [1, false]},"
+                             "    {\"logcheck\": [2, false]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_FALSE(any->matches(log_));
+    log_.checkFirst(3);
+}
+
+// And is able to return true
+TEST_F(LogicCreatorTest, andTrue) {
+    AllOfPtr all(load<AllOf>("{\"ALL\": ["
+                             "    {\"logcheck\": [0, true]},"
+                             "    {\"logcheck\": [1, true]},"
+                             "    {\"logcheck\": [2, true]}"
+                             "]}"));
+    EXPECT_EQ(3, all->getSubexpressions().size());
+    EXPECT_TRUE(all->matches(log_));
+    log_.checkFirst(3);
+}
+
+// We can nest them together
+TEST_F(LogicCreatorTest, nested) {
+    AllOfPtr all(load<AllOf>("{\"ALL\": ["
+                             "    {\"ANY\": ["
+                             "        {\"logcheck\": [0, true]},"
+                             "        {\"logcheck\": [2, true]},"
+                             "    ]},"
+                             "    {\"logcheck\": [1, false]}"
+                             "]}"));
+    EXPECT_EQ(2, all->getSubexpressions().size());
+    /*
+     * This has the same problem as load function above, and we use the
+     * same solution here.
+     */
+    ASSERT_STREQ(typeid(LogicOperator<AnyOfSpec, Log>).name(),
+                 typeid(*all->getSubexpressions()[0]).name());
+    const LogicOperator<AnyOfSpec, Log>*
+        any(static_cast<const LogicOperator<AnyOfSpec, Log>*>
+            (all->getSubexpressions()[0]));
+    EXPECT_EQ(2, any->getSubexpressions().size());
+    EXPECT_FALSE(all->matches(log_));
+    log_.checkFirst(2);
+}
+
+}
diff --git a/src/lib/acl/tests/run_unittests.cc b/src/lib/acl/tests/run_unittests.cc
index 61df6cf..8dc59a2 100644
--- a/src/lib/acl/tests/run_unittests.cc
+++ b/src/lib/acl/tests/run_unittests.cc
@@ -13,11 +13,12 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <gtest/gtest.h>
+#include <log/logger_support.h>
 #include <util/unittests/run_all.h>
 
 int
 main(int argc, char* argv[]) {
     ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
     return (isc::util::unittests::run_all());
 }
-
diff --git a/src/lib/asiodns/Makefile.am b/src/lib/asiodns/Makefile.am
index 2a6c3ac..2d246ef 100644
--- a/src/lib/asiodns/Makefile.am
+++ b/src/lib/asiodns/Makefile.am
@@ -8,13 +8,13 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-CLEANFILES = *.gcno *.gcda asiodef.h asiodef.cc
+CLEANFILES = *.gcno *.gcda asiodns_messages.h asiodns_messages.cc
 
 # Define rule to build logging source files from message file
-asiodef.h asiodef.cc: asiodef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodef.mes
+asiodns_messages.h asiodns_messages.cc: asiodns_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodns_messages.mes
 
-BUILT_SOURCES = asiodef.h asiodef.cc
+BUILT_SOURCES = asiodns_messages.h asiodns_messages.cc
 
 lib_LTLIBRARIES = libasiodns.la
 libasiodns_la_SOURCES = dns_answer.h
@@ -26,9 +26,9 @@ libasiodns_la_SOURCES += tcp_server.cc tcp_server.h
 libasiodns_la_SOURCES += udp_server.cc udp_server.h
 libasiodns_la_SOURCES += io_fetch.cc io_fetch.h
 
-nodist_libasiodns_la_SOURCES = asiodef.cc asiodef.h
+nodist_libasiodns_la_SOURCES = asiodns_messages.cc asiodns_messages.h
 
-EXTRA_DIST = asiodef.mes
+EXTRA_DIST = asiodns_messages.mes
 
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # B10_CXXFLAGS)
diff --git a/src/lib/asiodns/asiodef.mes b/src/lib/asiodns/asiodef.mes
deleted file mode 100644
index 3f2e80c..0000000
--- a/src/lib/asiodns/asiodef.mes
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX ASIODNS_
-$NAMESPACE isc::asiodns
-
-% FETCHCOMP   upstream fetch to %1(%2) has now completed
-A debug message, this records the the upstream fetch (a query made by the
-resolver on behalf of its client) to the specified address has completed.
-
-% FETCHSTOP   upstream fetch to %1(%2) has been stopped
-An external component has requested the halting of an upstream fetch.  This
-is an allowed operation, and the message should only appear if debug is
-enabled.
-
-% OPENSOCK    error %1 opening %2 socket to %3(%4)
-The asynchronous I/O code encountered an error when trying to open a socket
-of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
-message.
-
-% RECVSOCK    error %1 reading %2 data from %3(%4)
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-
-% SENDSOCK    error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-
-% RECVTMO     receive timeout while waiting for data from %1(%2)
-An upstream fetch from the specified address timed out.  This may happen for
-any number of reasons and is most probably a problem at the remote server
-or a problem on the network.  The message will only appear if debug is
-enabled.
-
-% UNKORIGIN  unknown origin for ASIO error code %1 (protocol: %2, address %3)
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-
-% UNKRESULT  unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message).  This message should
-not appear and may indicate an internal error.  Please enter a bug report.
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
new file mode 100644
index 0000000..3e11ede
--- /dev/null
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -0,0 +1,56 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::asiodns
+
+% ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+
+% ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+
+% ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that cause the problem is given in the
+message.
+
+% ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol.  The number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+
+% ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+
+% ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message).  Please submit a bug report.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 4b2edf9..31b5f50 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -41,7 +41,7 @@
 #include <log/logger.h>
 #include <log/macros.h>
 
-#include <asiodns/asiodef.h>
+#include <asiodns/asiodns_messages.h>
 #include <asiodns/io_fetch.h>
 
 #include <util/buffer.h>
@@ -158,7 +158,7 @@ struct IOFetchData {
         stopped(false),
         timeout(wait),
         packet(false),
-        origin(ASIODNS_UNKORIGIN),
+        origin(ASIODNS_UNKNOWN_ORIGIN),
         staging(),
         qid(QidGenerator::getInstance().generateQid())
     {}
@@ -280,7 +280,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
 
         // Open a connection to the target system.  For speed, if the operation
         // is synchronous (i.e. UDP operation) we bypass the yield.
-        data_->origin = ASIODNS_OPENSOCK;
+        data_->origin = ASIODNS_OPEN_SOCKET;
         if (data_->socket->isOpenSynchronous()) {
             data_->socket->open(data_->remote_snd.get(), *this);
         } else {
@@ -290,7 +290,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
         do {
             // Begin an asynchronous send, and then yield.  When the send completes,
             // we will resume immediately after this point.
-            data_->origin = ASIODNS_SENDSOCK;
+            data_->origin = ASIODNS_SEND_DATA;
             CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
                 data_->msgbuf->getLength(), data_->remote_snd.get(), *this);
     
@@ -313,7 +313,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
             // received all the data before copying it back to the user's buffer.
             // And we want to minimise the amount of copying...
     
-            data_->origin = ASIODNS_RECVSOCK;
+            data_->origin = ASIODNS_READ_DATA;
             data_->cumulative = 0;          // No data yet received
             data_->offset = 0;              // First data into start of buffer
             data_->received->clear();       // Clear the receive buffer
@@ -329,7 +329,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
 
         // Finished with this socket, so close it.  This will not generate an
         // I/O error, but reset the origin to unknown in case we change this.
-        data_->origin = ASIODNS_UNKORIGIN;
+        data_->origin = ASIODNS_UNKNOWN_ORIGIN;
         data_->socket->close();
 
         /// We are done
@@ -367,13 +367,13 @@ IOFetch::stop(Result result) {
         data_->stopped = true;
         switch (result) {
             case TIME_OUT:
-                LOG_DEBUG(logger, DBG_COMMON, ASIODNS_RECVTMO).
+                LOG_DEBUG(logger, DBG_COMMON, ASIODNS_READ_TIMEOUT).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
                 break;
 
             case SUCCESS:
-                LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCHCOMP).
+                LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCH_COMPLETED).
                     arg(data_->remote_rcv->getAddress().toText()).
                     arg(data_->remote_rcv->getPort());
                 break;
@@ -382,13 +382,13 @@ IOFetch::stop(Result result) {
                 // Fetch has been stopped for some other reason.  This is
                 // allowed but as it is unusual it is logged, but with a lower
                 // debug level than a timeout (which is totally normal).
-                LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCHSTOP).
+                LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCH_STOPPED).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
                 break;
 
             default:
-                LOG_ERROR(logger, ASIODNS_UNKRESULT).
+                LOG_ERROR(logger, ASIODNS_UNKNOWN_RESULT).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
         }
@@ -412,10 +412,10 @@ IOFetch::stop(Result result) {
 void IOFetch::logIOFailure(asio::error_code ec) {
 
     // Should only get here with a known error code.
-    assert((data_->origin == ASIODNS_OPENSOCK) ||
-           (data_->origin == ASIODNS_SENDSOCK) ||
-           (data_->origin == ASIODNS_RECVSOCK) ||
-           (data_->origin == ASIODNS_UNKORIGIN));
+    assert((data_->origin == ASIODNS_OPEN_SOCKET) ||
+           (data_->origin == ASIODNS_SEND_DATA) ||
+           (data_->origin == ASIODNS_READ_DATA) ||
+           (data_->origin == ASIODNS_UNKNOWN_ORIGIN));
 
     static const char* PROTOCOL[2] = {"TCP", "UDP"};
     LOG_ERROR(logger, data_->origin).arg(ec.value()).
diff --git a/src/lib/cc/data.h b/src/lib/cc/data.h
index 0a363f4..5c731e6 100644
--- a/src/lib/cc/data.h
+++ b/src/lib/cc/data.h
@@ -479,7 +479,7 @@ public:
         return (true);
     }
     using Element::setValue;
-    bool setValue(std::map<std::string, ConstElementPtr>& v) {
+    bool setValue(const std::map<std::string, ConstElementPtr>& v) {
         m = v;
         return (true);
     }
diff --git a/src/lib/config/Makefile.am b/src/lib/config/Makefile.am
index 52337ad..500ff12 100644
--- a/src/lib/config/Makefile.am
+++ b/src/lib/config/Makefile.am
@@ -6,10 +6,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 
 # Define rule to build logging source files from message file
-configdef.h configdef.cc: configdef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/configdef.mes
+config_messages.h config_messages.cc: config_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/config_messages.mes
 
-BUILT_SOURCES = configdef.h configdef.cc
+BUILT_SOURCES = config_messages.h config_messages.cc
 
 lib_LTLIBRARIES = libcfgclient.la
 libcfgclient_la_SOURCES = config_data.h config_data.cc
@@ -17,9 +17,9 @@ libcfgclient_la_SOURCES += module_spec.h module_spec.cc
 libcfgclient_la_SOURCES += ccsession.cc ccsession.h
 libcfgclient_la_SOURCES += config_log.h config_log.cc
 
-nodist_libcfgclient_la_SOURCES  = configdef.h configdef.cc
+nodist_libcfgclient_la_SOURCES  = config_messages.h config_messages.cc
 
 # The message file should be in the distribution.
-EXTRA_DIST = configdef.mes
+EXTRA_DIST = config_messages.mes
 
-CLEANFILES = *.gcno *.gcda configdef.h configdef.cc
+CLEANFILES = *.gcno *.gcda config_messages.h config_messages.cc
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index 857de63..6b094ec 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -23,6 +23,7 @@
 #include <fstream>
 #include <sstream>
 #include <cerrno>
+#include <set>
 
 #include <boost/bind.hpp>
 #include <boost/foreach.hpp>
@@ -38,6 +39,7 @@
 #include <log/logger_support.h>
 #include <log/logger_specification.h>
 #include <log/logger_manager.h>
+#include <log/logger_name.h>
 
 using namespace std;
 
@@ -213,7 +215,8 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
                 ConstElementPtr logger,
                 const ConfigData& config_data)
 {
-    const std::string lname = logger->get("name")->stringValue();
+    std::string lname = logger->get("name")->stringValue();
+
     ConstElementPtr severity_el = getValueOrDefault(logger,
                                       "severity", config_data,
                                       "loggers/severity");
@@ -246,15 +249,62 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
 
 } // end anonymous namespace
 
+
+ConstElementPtr
+getRelatedLoggers(ConstElementPtr loggers) {
+    // Keep a list of names for easier lookup later
+    std::set<std::string> our_names;
+    const std::string& root_name = isc::log::getRootLoggerName();
+
+    ElementPtr result = isc::data::Element::createList();
+
+    BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+        const std::string cur_name = cur_logger->get("name")->stringValue();
+        if (cur_name == root_name || cur_name.find(root_name + ".") == 0) {
+            our_names.insert(cur_name);
+            result->add(cur_logger);
+        }
+    }
+
+    // now find the * names
+    BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+        std::string cur_name = cur_logger->get("name")->stringValue();
+        // if name is '*', or starts with '*.', replace * with root
+        // logger name
+        if (cur_name == "*" || cur_name.length() > 1 &&
+            cur_name[0] == '*' && cur_name[1] == '.') {
+
+            cur_name = root_name + cur_name.substr(1);
+            // now add it to the result list, but only if a logger with
+            // that name was not configured explicitely
+            if (our_names.find(cur_name) == our_names.end()) {
+                // we substitute the name here already, but as
+                // we are dealing with consts, we copy the data
+                ElementPtr new_logger(Element::createMap());
+                // since we'll only be updating one first-level element,
+                // and we return as const again, a shallow map copy is
+                // enough
+                new_logger->setValue(cur_logger->mapValue());
+                new_logger->set("name", Element::create(cur_name));
+                result->add(new_logger);
+            }
+        }
+    }
+    return result;
+}
+
 void
-my_logconfig_handler(const std::string&n, ConstElementPtr new_config, const ConfigData& config_data) {
+default_logconfig_handler(const std::string& module_name,
+                          ConstElementPtr new_config,
+                          const ConfigData& config_data) {
     config_data.getModuleSpec().validateConfig(new_config, true);
 
     std::vector<isc::log::LoggerSpecification> specs;
 
     if (new_config->contains("loggers")) {
+        ConstElementPtr loggers = getRelatedLoggers(new_config->get("loggers"));
         BOOST_FOREACH(ConstElementPtr logger,
-                      new_config->get("loggers")->listValue()) {
+                      loggers->listValue()) {
             readLoggersConf(specs, logger, config_data);
         }
     }
@@ -272,7 +322,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
     // this file should be declared in a @something@ directive
     file.open(filename.c_str());
     if (!file) {
-        LOG_ERROR(config_logger, CONFIG_FOPEN_ERR).arg(filename).arg(strerror(errno));
+        LOG_ERROR(config_logger, CONFIG_OPEN_FAIL).arg(filename).arg(strerror(errno));
         isc_throw(CCSessionInitError, strerror(errno));
     }
 
@@ -282,7 +332,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
         LOG_ERROR(config_logger, CONFIG_JSON_PARSE).arg(filename).arg(pe.what());
         isc_throw(CCSessionInitError, pe.what());
     } catch (const ModuleSpecError& dde) {
-        LOG_ERROR(config_logger, CONFIG_MODULE_SPEC).arg(filename).arg(dde.what());
+        LOG_ERROR(config_logger, CONFIG_MOD_SPEC_FORMAT).arg(filename).arg(dde.what());
         isc_throw(CCSessionInitError, dde.what());
     }
     file.close();
@@ -332,7 +382,7 @@ ModuleCCSession::ModuleCCSession(
     int rcode;
     ConstElementPtr err = parseAnswer(rcode, answer);
     if (rcode != 0) {
-        LOG_ERROR(config_logger, CONFIG_MANAGER_MOD_SPEC).arg(answer->str());
+        LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
         isc_throw(CCSessionInitError, answer->str());
     }
     
@@ -346,14 +396,14 @@ ModuleCCSession::ModuleCCSession(
         if (rcode == 0) {
             handleConfigUpdate(new_config);
         } else {
-            LOG_ERROR(config_logger, CONFIG_MANAGER_CONFIG).arg(new_config->str());
+            LOG_ERROR(config_logger, CONFIG_GET_FAIL).arg(new_config->str());
             isc_throw(CCSessionInitError, answer->str());
         }
     }
 
     // Keep track of logging settings automatically
     if (handle_logging) {
-        addRemoteConfig("Logging", my_logconfig_handler, false);
+        addRemoteConfig("Logging", default_logconfig_handler, false);
     }
 
     if (start_immediately) {
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index 53aab78..7dc34ba 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -354,8 +354,60 @@ private:
     ModuleSpec fetchRemoteSpec(const std::string& module, bool is_filename);
 };
 
-}
-}
+/// \brief Default handler for logging config updates
+///
+/// When CCSession is initialized with handle_logging set to true,
+/// this callback will be used to update the logger when a configuration
+/// change comes in.
+///
+/// This function updates the (global) loggers by initializing a
+/// LoggerManager and passing the settings as specified in the given
+/// configuration update.
+///
+/// \param module_name The name of the module
+/// \param new_config The modified configuration values
+/// \param config_data The full config data for the (remote) logging
+///                    module.
+void
+default_logconfig_handler(const std::string& module_name,
+                          isc::data::ConstElementPtr new_config,
+                          const ConfigData& config_data);
+
+
+/// \brief Returns the loggers related to this module
+///
+/// This function does two things;
+/// - it drops the configuration parts for loggers for other modules
+/// - it replaces the '*' in the name of the loggers by the name of
+///   this module, but *only* if the expanded name is not configured
+///   explicitely
+///
+/// Examples: if this is the module b10-resolver,
+/// For the config names ['*', 'b10-auth']
+/// The '*' is replaced with 'b10-resolver', and this logger is used.
+/// 'b10-auth' is ignored (of course, it will not be in the b10-auth
+/// module).
+///
+/// For ['*', 'b10-resolver']
+/// The '*' is ignored, and only 'b10-resolver' is used.
+///
+/// For ['*.reslib', 'b10-resolver']
+/// Or ['b10-resolver.reslib', '*']
+/// Both are used, where the * will be expanded to b10-resolver
+///
+/// \note This is a public function at this time, but mostly for
+/// the purposes of testing. Once we can directly test what loggers
+/// are running, this function may be moved to the unnamed namespace
+///
+/// \param loggers the original 'loggers' config list
+/// \return ListElement containing only loggers relevant for this
+///         module, where * is replaced by the root logger name
+isc::data::ConstElementPtr
+getRelatedLoggers(isc::data::ConstElementPtr loggers);
+
+} // namespace config
+
+} // namespace isc
 #endif // __CCSESSION_H
 
 // Local Variables:
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 22e5a5c..0063855 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -16,7 +16,7 @@
 #define __CONFIG_LOG__H
 
 #include <log/macros.h>
-#include "configdef.h"
+#include "config_messages.h"
 
 namespace isc {
 namespace config {
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
new file mode 100644
index 0000000..660ab9a
--- /dev/null
+++ b/src/lib/config/config_messages.mes
@@ -0,0 +1,59 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::config
+
+% CONFIG_CCSESSION_MSG error in CC session message: %1
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+
+% CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+
+The most likely cause of this error is a programming error.  Please raise
+a bug report.
+
+% CONFIG_GET_FAIL error getting configuration from cfgmgr: %1
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+
+% CONFIG_JSON_PARSE JSON parse error in %1: %2
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+
+% CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+
+% CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+
+% CONFIG_OPEN_FAIL error opening %1: %2
+There was an error opening the given file. The reason for the failure
+is included in the message.
diff --git a/src/lib/config/configdef.mes b/src/lib/config/configdef.mes
deleted file mode 100644
index be39073..0000000
--- a/src/lib/config/configdef.mes
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX CONFIG_
-$NAMESPACE isc::config
-
-% FOPEN_ERR     error opening %1: %2
-There was an error opening the given file.
-
-% JSON_PARSE    JSON parse error in %1: %2
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-
-% MODULE_SPEC   module specification error in %1: %2
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
-
-% MANAGER_MOD_SPEC    module specification not accepted by cfgmgr: %1
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-
-% MANAGER_CONFIG    error getting configuration from cfgmgr: %1
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
-
-% CCSESSION_MSG error in CC session message: %1
-There was a problem with an incoming message on the command and control
-channel. The message does not appear to be a valid command, and is
-missing a required element or contains an unknown data format. This
-most likely means that another BIND10 module is sending a bad message.
-The message itself is ignored by this module.
-
-% CCSESSION_MSG_INTERNAL error handling CC session message: %1
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index e5fe049..e1a4f9d 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -24,6 +24,8 @@
 
 #include <config/tests/data_def_unittests_config.h>
 
+#include <log/logger_name.h>
+
 using namespace isc::data;
 using namespace isc::config;
 using namespace isc::cc;
@@ -632,4 +634,64 @@ TEST_F(CCSessionTest, doubleStartWithAddRemoteConfig) {
     EXPECT_THROW(mccs.addRemoteConfig(ccspecfile("spec2.spec")),
                  FakeSession::DoubleRead);
 }
+
+namespace {
+void doRelatedLoggersTest(const char* input, const char* expected) {
+    ConstElementPtr all_conf = isc::data::Element::fromJSON(input);
+    ConstElementPtr expected_conf = isc::data::Element::fromJSON(expected);
+    EXPECT_EQ(*expected_conf, *isc::config::getRelatedLoggers(all_conf));
+}
+} // end anonymous namespace
+
+TEST(LogConfigTest, relatedLoggersTest) {
+    // make sure logger configs for 'other' programs are ignored,
+    // and that * is substituted correctly
+    // The default root logger name is "bind10"
+    doRelatedLoggersTest("[{ \"name\": \"other_module\" }]",
+                         "[]");
+    doRelatedLoggersTest("[{ \"name\": \"other_module.somelib\" }]",
+                         "[]");
+    doRelatedLoggersTest("[{ \"name\": \"bind10_other\" }]",
+                         "[]");
+    doRelatedLoggersTest("[{ \"name\": \"bind10_other.somelib\" }]",
+                         "[]");
+    doRelatedLoggersTest("[ { \"name\": \"other_module\" },"
+                         "  { \"name\": \"bind10\" }]",
+                         "[ { \"name\": \"bind10\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"bind10\" }]",
+                         "[ { \"name\": \"bind10\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"bind10.somelib\" }]",
+                         "[ { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+                         "  { \"name\": \"bind10.somelib\" }]",
+                         "[ { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+                         "  { \"name\": \"bind10\" },"
+                         "  { \"name\": \"bind10.somelib\" }]",
+                         "[ { \"name\": \"bind10\" },"
+                         "  { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"*\" }]",
+                         "[ { \"name\": \"bind10\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"*.somelib\" }]",
+                         "[ { \"name\": \"bind10.somelib\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+                         "  { \"name\": \"bind10\", \"severity\": \"WARN\"}]",
+                         "[ { \"name\": \"bind10\", \"severity\": \"WARN\"} ]");
+    doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+                         "  { \"name\": \"some_module\", \"severity\": \"WARN\"}]",
+                         "[ { \"name\": \"bind10\", \"severity\": \"DEBUG\"} ]");
+
+    // make sure 'bad' things like '*foo.x' or '*lib' are ignored
+    // (cfgmgr should have already caught it in the logconfig plugin
+    // check, and is responsible for reporting the error)
+    doRelatedLoggersTest("[ { \"name\": \"*foo\" }]",
+                         "[ ]");
+    doRelatedLoggersTest("[ { \"name\": \"*foo.bar\" }]",
+                         "[ ]");
+    doRelatedLoggersTest("[ { \"name\": \"*foo\" },"
+                         "  { \"name\": \"*foo.lib\" },"
+                         "  { \"name\": \"bind10\" } ]",
+                         "[ { \"name\": \"bind10\" } ]");
+}
+
 }
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index e028186..457d5b0 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,7 +7,7 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-CLEANFILES = *.gcno *.gcda messagedef.h messagedef.cc
+CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
 
 lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
@@ -21,15 +21,15 @@ libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
 libdatasrc_la_SOURCES += zone.h
 libdatasrc_la_SOURCES += result.h
 libdatasrc_la_SOURCES += logger.h logger.cc
-nodist_libdatasrc_la_SOURCES = messagedef.h messagedef.cc
+nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
 libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 
-BUILT_SOURCES = messagedef.h messagedef.cc
-messagedef.h messagedef.cc: Makefile messagedef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/messagedef.mes
+BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 
-EXTRA_DIST = messagedef.mes
+EXTRA_DIST = datasrc_messages.mes
diff --git a/src/lib/datasrc/cache.cc b/src/lib/datasrc/cache.cc
index 8e9487d..9082a6b 100644
--- a/src/lib/datasrc/cache.cc
+++ b/src/lib/datasrc/cache.cc
@@ -100,6 +100,19 @@ public:
     /// \return \c RRsetPtr
     RRsetPtr getRRset() const { return (entry->rrset); }
 
+    /// \brief Returns name associated with cached node
+    ///
+    /// This is the name associated with the RRset if it is a positive
+    /// entry, and the associated question name if the RRSet is NULL
+    /// and this is a negative entry (together with an indication that
+    /// this is a negative entry).
+    string getNodeName() const {
+        if (getRRset()) {
+            return (getRRset()->getName().toText());
+        }
+        return (std::string("negative entry for ") + question.toText());
+    }
+
     /// \brief Returns the query response flags associated with the data.
     ///
     /// \return \c uint32_t
@@ -213,7 +226,7 @@ HotCacheImpl::HotCacheImpl(int slots, bool enabled) :
 inline void
 HotCacheImpl::insert(const CacheNodePtr node) {
     LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_INSERT).
-        arg(node->getRRset()->getName());
+        arg(node->getNodeName());
     std::map<Question, CacheNodePtr>::const_iterator iter;
     iter = map_.find(node->question);
     if (iter != map_.end()) {
@@ -253,7 +266,7 @@ HotCacheImpl::promote(CacheNodePtr node) {
 void
 HotCacheImpl::remove(ConstCacheNodePtr node) {
     LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_REMOVE).
-        arg(node->getRRset()->getName());
+        arg(node->getNodeName());
     lru_.erase(node->lru_entry_);
     map_.erase(node->question);
     --count_;
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
new file mode 100644
index 0000000..c692364
--- /dev/null
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -0,0 +1,493 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::datasrc
+
+# \brief Messages for the data source library
+
+% DATASRC_CACHE_CREATE creating the hotspot cache
+Debug information that the hotspot cache was created at startup.
+
+% DATASRC_CACHE_DESTROY destroying the hotspot cache
+Debug information. The hotspot cache is being destroyed.
+
+% DATASRC_CACHE_DISABLE disabling the cache
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+
+% DATASRC_CACHE_ENABLE enabling the cache
+The hotspot cache is enabled from now on.
+
+% DATASRC_CACHE_EXPIRED the item '%1' is expired
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+
+% DATASRC_CACHE_FOUND the item '%1' was found
+Debug information. An item was successfully looked up in the hotspot cache.
+
+% DATASRC_CACHE_FULL cache is full, dropping oldest
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_INSERT inserting item '%1' into the cache
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+
+% DATASRC_CACHE_NOT_FOUND the item '%1' was not found
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+
+% DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_REMOVE removing '%1' from the cache
+Debug information. An item is being removed from the hotspot cache.
+
+% DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+
+% DATASRC_DO_QUERY handling query for '%1/%2'
+Debug information. We're processing some internal query for given name and
+type.
+
+% DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
+Debug information. An RRset is being added to the in-memory data source.
+
+% DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+
+% DATASRC_MEM_ADD_ZONE adding zone '%1/%2'
+Debug information. A zone is being added into the in-memory data source.
+
+% DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+
+% DATASRC_MEM_CNAME CNAME at the domain '%1'
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+
+% DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+
+% DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+
+% DATASRC_MEM_CREATE creating zone '%1' in '%2' class
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+
+% DATASRC_MEM_DELEG_FOUND delegation found at '%1'
+Debug information. A delegation point was found above the requested record.
+
+% DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class
+Debug information. A zone from in-memory data source is being destroyed.
+
+% DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+
+% DATASRC_MEM_DNAME_FOUND DNAME found at '%1'
+Debug information. A DNAME was found instead of the requested information.
+
+% DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+
+% DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+
+% DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+
+% DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+
+% DATASRC_MEM_FIND find '%1/%2'
+Debug information. A search for the requested RRset is being started.
+
+% DATASRC_MEM_FIND_ZONE looking for zone '%1'
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+
+% DATASRC_MEM_LOAD loading zone '%1' from file '%2'
+Debug information. The content of master file is being loaded into the memory.
+
+% DATASRC_MEM_NOTFOUND requested domain '%1' not found
+Debug information. The requested domain does not exist.
+
+% DATASRC_MEM_NS_ENCOUNTERED encountered a NS
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+
+% DATASRC_MEM_NXRRSET no such type '%1' at '%2'
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+
+% DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+
+% DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+
+% DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+
+% DATASRC_MEM_SUCCESS query for '%1/%2' successful
+Debug information. The requested record was found.
+
+% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+
+% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+
+% DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+
+% DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_META_ADD adding a data source into meta data source
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+
+% DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+
+% DATASRC_META_REMOVE removing data source from meta data source
+Debug information. A data source is being removed from meta data source.
+
+% DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'
+Debug information. A NSEC record covering this zone is being added.
+
+% DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+
+% DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message
+Debug information. An RRset is being added to the response message.
+
+% DATASRC_QUERY_ADD_SOA adding SOA of '%1'
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+
+% DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+
+% DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+
+% DATASRC_QUERY_CACHED data for %1/%2 found in cache
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+
+% DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+
+% DATASRC_QUERY_COPY_AUTH copying authoritative section into message
+Debug information. The whole referral information is being copied into the
+response message.
+
+% DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+
+% DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+
+% DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+
+% DATASRC_QUERY_FAIL query failed
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+
+% DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+
+% DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+
+% DATASRC_QUERY_INVALID_OP invalid query operation requested
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+
+% DATASRC_QUERY_IS_AUTH auth query (%1/%2)
+Debug information. The last DO_QUERY is an auth query.
+
+% DATASRC_QUERY_IS_GLUE glue query (%1/%2)
+Debug information. The last DO_QUERY is query for glue addresses.
+
+% DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+
+% DATASRC_QUERY_IS_REF query for referral (%1/%2)
+Debug information. The last DO_QUERY is query for referral information.
+
+% DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)
+Debug information. The last DO_QUERY is a simple query.
+
+% DATASRC_QUERY_MISPLACED_TASK task of this type should not be here
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+
+% DATASRC_QUERY_MISSING_NS missing NS records for '%1'
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+
+% DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+
+% DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+
+% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+
+% DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+
+% DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class
+Debug information. A sure query is being processed now.
+
+% DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+
+% DATASRC_QUERY_REF_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+
+% DATASRC_QUERY_RRSIG unable to answer RRSIG query
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+
+% DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+
+% DATASRC_QUERY_TASK_FAIL task failed with %1
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+
+% DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+
+% DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+
+% DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+
+% DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+
+% DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+
+% DATASRC_SQLITE_CLOSE closing SQLite database
+Debug information. The SQLite data source is closing the database file.
+% DATASRC_SQLITE_CREATE SQLite data source created
+Debug information. An instance of SQLite data source is being created.
+
+% DATASRC_SQLITE_DESTROY SQLite data source destroyed
+Debug information. An instance of SQLite data source is being destroyed.
+
+% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+
+% DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+
+% DATASRC_SQLITE_FIND looking for RRset '%1/%2'
+Debug information. The SQLite data source is looking up a resource record
+set.
+
+% DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
+Debug information. The data source is looking up the addresses for given
+domain name.
+
+% DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'
+Debug information. The SQLite data source is looking up an exact resource
+record.
+
+% DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDREC looking for record '%1/%2'
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+
+% DATASRC_SQLITE_FINDREF looking for referral at '%1'
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+
+% DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+
+% DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+
+% DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+
+% DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+
+% DATASRC_SQLITE_OPEN opening SQLite database '%1'
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+
+% DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'
+Debug information. We're trying to look up name preceding the supplied one.
+
+% DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+
+% DATASRC_SQLITE_SETUP setting up SQLite database
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+
+% DATASRC_STATIC_BAD_CLASS static data source can handle CH only
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+
+% DATASRC_STATIC_CREATE creating the static datasource
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+
+% DATASRC_STATIC_FIND looking for '%1/%2'
+Debug information. This resource record set is being looked up in the static
+data source.
+
+% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
+This indicates a programming error. An internal task of unknown type was
+generated.
+
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index 7c2828d..ac5d50b 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -16,7 +16,7 @@
 #define __DATASRC_LOGGER_H
 
 #include <log/macros.h>
-#include <datasrc/messagedef.h>
+#include <datasrc/datasrc_messages.h>
 
 /// \file logger.h
 /// \brief Data Source library global logger
diff --git a/src/lib/datasrc/messagedef.mes b/src/lib/datasrc/messagedef.mes
deleted file mode 100644
index 0374306..0000000
--- a/src/lib/datasrc/messagedef.mes
+++ /dev/null
@@ -1,494 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX DATASRC_
-$NAMESPACE isc::datasrc
-
-# \brief Messages for the data source library
-
-% CACHE_CREATE creating the hotspot cache
-Debug information that the hotspot cache was created at startup.
-
-% CACHE_DESTROY destroying the hotspot cache
-Debug information. The hotspot cache is being destroyed.
-
-% CACHE_INSERT inserting item '%1' into the cache
-Debug information. It means a new item is being inserted into the hotspot
-cache.
-
-% CACHE_OLD_FOUND older instance of cache item found, replacing
-Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_FULL cache is full, dropping oldest
-Debug information. After inserting an item into the hotspot cache, the
-maximum number of items was exceeded, so the least recently used item will
-be dropped. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_REMOVE removing '%1' from the cache
-Debug information. An item is being removed from the hotspot cache.
-
-% CACHE_NOT_FOUND the item '%1' was not found
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-
-% CACHE_FOUND the item '%1' was found
-Debug information. An item was successfully looked up in the hotspot cache.
-
-% CACHE_EXPIRED the item '%1' is expired
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
-
-% CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
-The maximum allowed number of items of the hotspot cache is set to the given
-number. If there are too many, some of them will be dropped. The size of 0
-means no limit.
-
-% CACHE_ENABLE enabling the cache
-The hotspot cache is enabled from now on.
-
-% CACHE_DISABLE disabling the cache
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-
-% QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
-
-% QUERY_EMPTY_DNAME the DNAME on '%1' is empty
-During an attempt to synthesize CNAME from this DNAME it was discovered the
-DNAME is empty (it has no records). This indicates problem with supplied data.
-
-% QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
-Debug information. While processing a query, a NS record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
-Debug information. While processing a query, a MX record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_FOLLOW_CNAME following CNAME at '%1'
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
-
-% QUERY_EMPTY_CNAME cNAME at '%1' is empty
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
-
-% QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'
-A CNAME led to another CNAME and it led to another, and so on. After 16
-CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
-might possibly be a loop as well. Note that some of the CNAMEs might have
-been synthesized from DNAMEs. This indicates problem with supplied data.
-
-% QUERY_CHECK_CACHE checking cache for '%1/%2'
-Debug information. While processing a query, lookup to the hotspot cache
-is being made.
-
-% QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for ANY queries for consistency
-reasons.
-
-% QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for authoritative ANY queries
-for consistency reasons.
-
-% DO_QUERY handling query for '%1/%2'
-Debug information. We're processing some internal query for given name and
-type.
-
-% QUERY_NO_ZONE no zone containing '%1' in class '%2'
-Lookup of domain failed because the data have no zone that contain the
-domain. Maybe someone sent a query to the wrong server for some reason.
-
-% QUERY_CACHED data for %1/%2 found in cache
-Debug information. The requested data were found in the hotspot cache, so
-no query is sent to the real data source.
-
-% QUERY_IS_SIMPLE simple query (%1/%2)
-Debug information. The last DO_QUERY is a simple query.
-
-% QUERY_IS_AUTH auth query (%1/%2)
-Debug information. The last DO_QUERY is an auth query.
-
-% QUERY_IS_GLUE glue query (%1/%2)
-Debug information. The last DO_QUERY is query for glue addresses.
-
-% QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
-Debug information. The last DO_QUERY is query for addresses that are not
-glue.
-
-% QUERY_IS_REF query for referral (%1/%2)
-Debug information. The last DO_QUERY is query for referral information.
-
-% QUERY_SIMPLE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the simple query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_AUTH_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the authoritative query. 1 means
-some error, 2 is not implemented. The data source should have logged the
-specific error already.
-
-% QUERY_GLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the glue query. 1 means some error,
-2 is not implemented. The data source should have logged the specific error
-already.
-
-% QUERY_NOGLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the no-glue query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_REF_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the query for referral information.
-1 means some error, 2 is not implemented. The data source should have logged
-the specific error already.
-
-% QUERY_INVALID_OP invalid query operation requested
-This indicates a programmer error. The DO_QUERY was called with unknown
-operation code.
-
-% QUERY_ADD_RRSET adding RRset '%1/%2' to message
-Debug information. An RRset is being added to the response message.
-
-% QUERY_COPY_AUTH copying authoritative section into message
-Debug information. The whole referral information is being copied into the
-response message.
-
-% QUERY_DELEGATION looking for delegation on the path to '%1'
-Debug information. The software is trying to identify delegation points on the
-way down to the given domain.
-
-% QUERY_ADD_SOA adding SOA of '%1'
-Debug information. A SOA record of the given zone is being added to the
-authority section of the response message.
-
-% QUERY_ADD_NSEC adding NSEC record for '%1'
-Debug information. A NSEC record covering this zone is being added.
-
-% QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
-Debug information. A NSEC3 record for the given zone is being added to the
-response message.
-
-% QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
-An attempt to add a NSEC3 record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
-An attempt to add a NSEC record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_WILDCARD looking for a wildcard covering '%1'
-Debug information. A direct match wasn't found, so a wildcard covering the
-domain is being looked for now.
-
-% QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
-While processing a wildcard, it wasn't possible to prove nonexistence of the
-given domain or record.  The code is 1 for error and 2 for not implemented.
-
-% QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
-While processing a wildcard, a referral was met. But it wasn't possible to get
-enough information for it.  The code is 1 for error, 2 for not implemented.
-
-% QUERY_PROCESS processing query '%1/%2' in the '%3' class
-Debug information. A sure query is being processed now.
-
-% QUERY_RRSIG unable to answer RRSIG query
-The server is unable to answer a direct query for RRSIG type, but was asked
-to do so.
-
-% QUERY_MISPLACED_TASK task of this type should not be here
-This indicates a programming error. A task was found in the internal task
-queue, but this kind of task wasn't designed to be inside the queue (it should
-be handled right away, not queued).
-
-% QUERY_TASK_FAIL task failed with %1
-The query subtask failed. The reason should have been reported by the subtask
-already. The code is 1 for error, 2 for not implemented.
-
-% QUERY_MISSING_NS missing NS records for '%1'
-NS records should have been put into the authority section. However, this zone
-has none. This indicates problem with provided data.
-
-% UNEXPECTED_QUERY_STATE unexpected query state
-This indicates a programming error. An internal task of unknown type was
-generated.
-
-% QUERY_FAIL query failed
-Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
-
-% QUERY_BAD_REFERRAL bad referral to '%1'
-The domain lives in another zone. But it is not possible to generate referral
-information for it.
-
-% QUERY_WILDCARD_FAIL error processing wildcard for '%1'
-During an attempt to cover the domain by a wildcard an error happened. The
-exact kind was hopefully already reported.
-
-% QUERY_MISSING_SOA the zone '%1' has no SOA
-The answer should have been a negative one (eg. of nonexistence of something).
-To do so, a SOA record should be put into the authority section, but the zone
-does not have one. This indicates problem with provided data.
-
-% QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
-The user wants DNSSEC and we discovered the entity doesn't exist (either
-domain or the record). But there was an error getting NSEC/NSEC3 record
-to prove the nonexistence.
-
-% QUERY_UNKNOWN_RESULT unknown result of subtask
-This indicates a programmer error. The answer of subtask doesn't look like
-anything known.
-
-% META_ADD adding a data source into meta data source
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
-
-% META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
-It was attempted to add a data source into a meta data source. But their
-classes do not match.
-
-% META_REMOVE removing data source from meta data source
-Debug information. A data source is being removed from meta data source.
-
-% MEM_ADD_WILDCARD adding wildcards for '%1'
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
-
-% MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
-Someone or something tried to add a CNAME into a domain that already contains
-some other data. But the protocol forbids coexistence of CNAME with anything
-(RFC 1034, section 3.6.2). This indicates a problem with provided data.
-
-% MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
-This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
-
-% MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
-
-% MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
-Some resource types are singletons -- only one is allowed in a domain
-(for example CNAME or SOA). This indicates a problem with provided data.
-
-% MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
-It was attempted to add the domain into a zone that shouldn't have it
-(eg. the domain is not subdomain of the zone origin). This indicates a
-problem with provided data.
-
-% MEM_WILDCARD_NS nS record in wildcard domain '%1'
-The software refuses to load NS records into a wildcard domain.  It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'
-The software refuses to load DNAME records into a wildcard domain.  It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
-Debug information. An RRset is being added to the in-memory data source.
-
-% MEM_DUP_RRSET duplicate RRset '%1/%2'
-An RRset is being inserted into in-memory data source for a second time.  The
-original version must be removed first. Note that loading master files where an
-RRset is split into multiple locations is not supported yet.
-
-% MEM_DNAME_ENCOUNTERED encountered a DNAME
-Debug information. While searching for the requested domain, a DNAME was
-encountered on the way.  This may lead to redirection to a different domain and
-stop the search.
-
-% MEM_NS_ENCOUNTERED encountered a NS
-Debug information. While searching for the requested domain, a NS was
-encountered on the way (a delegation). This may lead to stop of the search.
-
-% MEM_RENAME renaming RRset from '%1' to '%2'
-Debug information. A RRset is being generated from a different RRset (most
-probably a wildcard). So it must be renamed to whatever the user asked for. In
-fact, it's impossible to rename RRsets with our libraries, so a new one is
-created and all resource records are copied over.
-
-% MEM_FIND find '%1/%2'
-Debug information. A search for the requested RRset is being started.
-
-% MEM_DNAME_FOUND DNAME found at '%1'
-Debug information. A DNAME was found instead of the requested information.
-
-% MEM_DELEG_FOUND delegation found at '%1'
-Debug information. A delegation point was found above the requested record.
-
-% MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
-Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
-case (eg. the domain exists, but it doesn't have the requested record type).
-
-% MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
-Debug information. A domain above wildcard was reached, but there's something
-below the requested domain. Therefore the wildcard doesn't apply here.  This
-behaviour is specified by RFC 1034, section 4.3.3
-
-% MEM_NOTFOUND requested domain '%1' not found
-Debug information. The requested domain does not exist.
-
-% MEM_DOMAIN_EMPTY requested domain '%1' is empty
-Debug information. The requested domain exists in the tree of domains, but
-it is empty. Therefore it doesn't contain the requested resource type.
-
-% MEM_EXACT_DELEGATION delegation at the exact domain '%1'
-Debug information. There's a NS record at the requested domain. This means
-this zone is not authoritative for the requested domain, but a delegation
-should be followed. The requested domain is an apex of some zone.
-
-% MEM_ANY_SUCCESS ANY query for '%1' successful
-Debug information. The domain was found and an ANY type query is being answered
-by providing everything found inside the domain.
-
-% MEM_SUCCESS query for '%1/%2' successful
-Debug information. The requested record was found.
-
-% MEM_CNAME CNAME at the domain '%1'
-Debug information. The requested domain is an alias to a different domain,
-returning the CNAME instead.
-
-% MEM_NXRRSET no such type '%1' at '%2'
-Debug information. The domain exists, but it doesn't hold any record of the
-requested type.
-
-% MEM_CREATE creating zone '%1' in '%2' class
-Debug information. A representation of a zone for the in-memory data source is
-being created.
-
-% MEM_DESTROY destroying zone '%1' in '%2' class
-Debug information. A zone from in-memory data source is being destroyed.
-
-% MEM_LOAD loading zone '%1' from file '%2'
-Debug information. The content of master file is being loaded into the memory.
-
-% MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
-Debug information. The contents of two in-memory zones are being exchanged.
-This is usual practice to do some manipulation in exception-safe manner -- the
-new data are prepared in a different zone object and when it works, they are
-swapped. The old one contains the new data and the other one can be safely
-destroyed.
-
-% MEM_ADD_ZONE adding zone '%1/%2'
-Debug information. A zone is being added into the in-memory data source.
-
-% MEM_FIND_ZONE looking for zone '%1'
-Debug information. A zone object for this zone is being searched for in the
-in-memory data source.
-
-% STATIC_CREATE creating the static datasource
-Debug information. The static data source (the one holding stuff like
-version.bind) is being created.
-
-% STATIC_BAD_CLASS static data source can handle CH only
-For some reason, someone asked the static data source a query that is not in
-the CH class.
-
-% STATIC_FIND looking for '%1/%2'
-Debug information. This resource record set is being looked up in the static
-data source.
-
-% SQLITE_FINDREC looking for record '%1/%2'
-Debug information. The SQLite data source is looking up records of given name
-and type in the database.
-
-% SQLITE_ENCLOSURE looking for zone containing '%1'
-Debug information. The SQLite data source is trying to identify, which zone
-should hold this domain.
-
-% SQLITE_ENCLOSURE_NOTFOUND no zone contains it
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
-no such zone in our data.
-
-% SQLITE_PREVIOUS looking for name previous to '%1'
-Debug information. We're trying to look up name preceding the supplied one.
-
-% SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
-
-% SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
-Debug information. We're trying to look up a NSEC3 record in the SQLite data
-source.
-
-% SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
-The SQLite data source was asked to provide a NSEC3 record for given zone.
-But it doesn't contain that zone.
-
-% SQLITE_FIND looking for RRset '%1/%2'
-Debug information. The SQLite data source is looking up a resource record
-set.
-
-% SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an RRset, but the data source contains
-different class than the query was for.
-
-% SQLITE_FINDEXACT looking for exact RRset '%1/%2'
-Debug information. The SQLite data source is looking up an exact resource
-record.
-
-% SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an exact RRset, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
-Debug information. The data source is looking up the addresses for given
-domain name.
-
-% SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
-The SQLite data source was looking up A/AAAA addresses, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDREF looking for referral at '%1'
-Debug information. The SQLite data source is identifying if this domain is
-a referral and where it goes.
-
-% SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
-The SQLite data source was trying to identify, if there's a referral. But
-it contains different class than the query was for.
-
-% SQLITE_CREATE sQLite data source created
-Debug information. An instance of SQLite data source is being created.
-
-% SQLITE_DESTROY sQLite data source destroyed
-Debug information. An instance of SQLite data source is being destroyed.
-
-% SQLITE_SETUP setting up SQLite database
-The database for SQLite data source was found empty. It is assumed this is the
-first run and it is being initialized with current schema.  It'll still contain
-no data, but it will be ready for use.
-
-% SQLITE_OPEN opening SQLite database '%1'
-Debug information. The SQLite data source is loading an SQLite database in
-the provided file.
-
-% SQLITE_CLOSE closing SQLite database
-Debug information. The SQLite data source is closing the database file.
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index a42037b..d0f1d74 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -163,6 +163,17 @@ public:
         oss__ << stream; \
         throw type(__FILE__, __LINE__, oss__.str().c_str()); \
     } while (1)
+
+///
+/// Similar as isc_throw, but allows the exception to have one additional
+/// parameter (the stream/text goes first)
+#define isc_throw_1(type, stream, param1) \
+    do { \
+        std::ostringstream oss__; \
+        oss__ << stream; \
+        throw type(__FILE__, __LINE__, oss__.str().c_str(), param1); \
+    } while (1)
+
 }
 #endif // __EXCEPTIONS_H
 
diff --git a/src/lib/log/logger.cc b/src/lib/log/logger.cc
index 5a35d36..d10e979 100644
--- a/src/lib/log/logger.cc
+++ b/src/lib/log/logger.cc
@@ -18,6 +18,7 @@
 #include <log/logger.h>
 #include <log/logger_impl.h>
 #include <log/logger_name.h>
+#include <log/logger_support.h>
 #include <log/message_dictionary.h>
 #include <log/message_types.h>
 
@@ -28,9 +29,14 @@ using namespace std;
 namespace isc {
 namespace log {
 
-// Initialize Logger.
+// Initialize underlying logger, but only if logging has been initialized.
 void Logger::initLoggerImpl() {
-    loggerptr_ = new LoggerImpl(name_);
+    if (isLoggingInitialized()) {
+        loggerptr_ = new LoggerImpl(name_);
+    } else {
+        isc_throw(LoggingNotInitialized, "attempt to access logging function "
+                  "before logging has been initialized");
+    }
 }
 
 // Destructor.
diff --git a/src/lib/log/logger.h b/src/lib/log/logger.h
index 378db10..96168c0 100644
--- a/src/lib/log/logger.h
+++ b/src/lib/log/logger.h
@@ -18,6 +18,7 @@
 #include <cstdlib>
 #include <string>
 
+#include <exceptions/exceptions.h>
 #include <log/logger_level.h>
 #include <log/message_types.h>
 #include <log/log_formatter.h>
@@ -73,6 +74,17 @@ namespace log {
 
 class LoggerImpl;   // Forward declaration of the implementation class
 
+/// \brief Logging Not Initialized
+///
+/// Exception thrown if an attempt is made to access a logging function
+/// if the logging system has not been initialized.
+class LoggingNotInitialized : public isc::Exception {
+public:
+    LoggingNotInitialized(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what)
+    {}
+};
+
 /// \brief Logger Class
 ///
 /// This class is the main class used for logging.  Use comprises:
@@ -224,15 +236,14 @@ private:
 
     /// \brief Initialize Implementation
     ///
-    /// Returns the logger pointer.  If not yet set, the underlying
-    /// implementation class is initialized.\n
-    /// \n
-    /// The reason for this indirection is to avoid the "static initialization
-    /// fiacso", whereby we cannot rely on the order of static initializations.
-    /// The main problem is the root logger name - declared statically - which
-    /// is referenced by various loggers.  By deferring a reference to it until
-    /// after the program starts executing - by which time the root name object
-    /// will be initialized - we avoid this problem.
+    /// Returns the logger pointer.  If not yet set, the implementation class is
+    /// initialized.
+    ///
+    /// The main reason for this is to allow loggers to be declared statically
+    /// before the underlying logging system is initialized.  However, any
+    /// attempt to access a logging method on any logger before initialization -
+    /// regardless of whether is is statically or automatically declared -  will
+    /// cause a "LoggingNotInitialized" exception to be thrown.
     ///
     /// \return Returns pointer to implementation
     LoggerImpl* getLoggerPtr() {
diff --git a/src/lib/log/logger_impl.cc b/src/lib/log/logger_impl.cc
index cbf2f1f..046da13 100644
--- a/src/lib/log/logger_impl.cc
+++ b/src/lib/log/logger_impl.cc
@@ -98,7 +98,7 @@ LoggerImpl::getEffectiveDebugLevel() {
 // Output a general message
 string*
 LoggerImpl::lookupMessage(const MessageID& ident) {
-    return (new string(string(ident) + ", " +
+    return (new string(string(ident) + " " +
                        MessageDictionary::globalDictionary().getText(ident)));
 }
 
diff --git a/src/lib/log/logger_manager.cc b/src/lib/log/logger_manager.cc
index 9955a83..4d56e4b 100644
--- a/src/lib/log/logger_manager.cc
+++ b/src/lib/log/logger_manager.cc
@@ -15,10 +15,11 @@
 #include <algorithm>
 #include <vector>
 
-#include <log/logger.h>
+#include <log/logger_level.h>
 #include <log/logger_manager_impl.h>
 #include <log/logger_manager.h>
 #include <log/logger_name.h>
+#include <log/logger_support.h>
 #include <log/messagedef.h>
 #include <log/message_dictionary.h>
 #include <log/message_exception.h>
@@ -110,6 +111,7 @@ LoggerManager::init(const std::string& root, isc::log::Severity severity,
     // Initialize the implementation logging.  After this point, some basic
     // logging has been set up and messages can be logged.
     LoggerManagerImpl::init(severity, dbglevel);
+    setLoggingInitialized();
 
     // Check if there were any duplicate message IDs in the default dictionary
     // and if so, log them.  Log using the logging facility logger.
diff --git a/src/lib/log/logger_support.cc b/src/lib/log/logger_support.cc
index 78e28f3..73323a0 100644
--- a/src/lib/log/logger_support.cc
+++ b/src/lib/log/logger_support.cc
@@ -29,20 +29,37 @@
 #include <iostream>
 #include <string>
 
-#include <log/logger.h>
+#include <log/logger_level.h>
 #include <log/logger_manager.h>
 #include <log/logger_support.h>
 
+using namespace std;
+
+namespace {
+
+// Flag to hold logging initialization state.
+bool logging_init_state = false;
+
+} // Anonymous namespace
+
 namespace isc {
 namespace log {
 
-using namespace std;
+// Return initialization state.
+bool
+isLoggingInitialized() {
+    return (logging_init_state);
+}
 
-// Declare a logger for the logging subsystem.  This is a sub-logger of the
-// root logger and is used in all functions in this file.
-Logger logger("log");
+// Set initialization state.  (Note: as logging can be initialized via a direct
+// call to LoggerManager::init(), this function is called from there, not from
+// the initialization functions in this file.
+void
+setLoggingInitialized(bool state) {
+    logging_init_state = state;
+}
 
-/// Logger Run-Time Initialization
+// Logger Run-Time Initialization.
 
 void
 initLogger(const string& root, isc::log::Severity severity, int dbglevel,
@@ -50,7 +67,7 @@ initLogger(const string& root, isc::log::Severity severity, int dbglevel,
     LoggerManager::init(root, severity, dbglevel, file);
 }
 
-/// Logger Run-Time Initialization via Environment Variables
+// Logger Run-Time Initialization via Environment Variables
 void initLogger(isc::log::Severity severity, int dbglevel) {
 
     // Root logger name is defined by the environment variable B10_LOGGER_ROOT.
@@ -79,20 +96,20 @@ void initLogger(isc::log::Severity severity, int dbglevel) {
             try {
                 level = boost::lexical_cast<int>(dbg_char);
                 if (level < MIN_DEBUG_LEVEL) {
-                    std::cerr << "**ERROR** debug level of " << level
-                              << " is invalid - a value of " << MIN_DEBUG_LEVEL
-                              << " will be used\n";
+                    cerr << "**ERROR** debug level of " << level
+                         << " is invalid - a value of " << MIN_DEBUG_LEVEL
+                         << " will be used\n";
                     level = MIN_DEBUG_LEVEL;
                 } else if (level > MAX_DEBUG_LEVEL) {
-                    std::cerr << "**ERROR** debug level of " << level
-                              << " is invalid - a value of " << MAX_DEBUG_LEVEL
-                              << " will be used\n";
+                    cerr << "**ERROR** debug level of " << level
+                         << " is invalid - a value of " << MAX_DEBUG_LEVEL
+                         << " will be used\n";
                     level = MAX_DEBUG_LEVEL;
                 }
             } catch (...) {
                 // Error, but not fatal to the test
-                std::cerr << "**ERROR** Unable to translate "
-                             "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
+                cerr << "**ERROR** Unable to translate "
+                        "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
             }
             dbglevel = level;
         }
diff --git a/src/lib/log/logger_support.h b/src/lib/log/logger_support.h
index 5d574d3..4bc8acc 100644
--- a/src/lib/log/logger_support.h
+++ b/src/lib/log/logger_support.h
@@ -23,6 +23,26 @@
 namespace isc {
 namespace log {
 
+/// \brief Is logging initialized?
+///
+/// As some underlying logging implementations can behave unpredictably if they
+/// have not been initialized when a logging function is called, their
+/// initialization state is tracked.  The logger functions will check this flag
+/// and throw an exception if logging is not initialized at that point.
+///
+/// \return true if logging has been initialized, false if not
+bool isLoggingInitialized();
+
+/// \brief Set "logging initialized" flag
+///
+/// Sets the state of the "logging initialized" flag.
+///
+/// \param state State to set the flag to. (This is expected to be "true" - the
+///        default - for all code apart from specific unit tests.)
+void setLoggingInitialized(bool state = true);
+
+
+
 /// \brief Run-Time Initialization
 ///
 /// Performs run-time initialization of the logger in particular supplying:
@@ -70,7 +90,9 @@ void initLogger(const std::string& root,
 ///
 /// Any errors in the settings cause messages to be output to stderr.
 ///
-/// This function is most likely to be called from unit test programs.
+/// This function is aimed at test programs, allowing the default settings to
+/// be overridden by the tester.  It is not intended for use in production
+/// code.
 
 void initLogger(isc::log::Severity severity = isc::log::INFO,
                 int dbglevel = 0);
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index 93703c1..cd2ae29 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -19,6 +19,7 @@ run_unittests_SOURCES += logger_level_impl_unittest.cc
 run_unittests_SOURCES += logger_level_unittest.cc
 run_unittests_SOURCES += logger_manager_unittest.cc
 run_unittests_SOURCES += logger_name_unittest.cc
+run_unittests_SOURCES += logger_support_unittest.cc
 run_unittests_SOURCES += logger_unittest.cc
 run_unittests_SOURCES += logger_specification_unittest.cc
 run_unittests_SOURCES += message_dictionary_unittest.cc
diff --git a/src/lib/log/tests/destination_test.sh.in b/src/lib/log/tests/destination_test.sh.in
index ff2d3fb..e02141d 100755
--- a/src/lib/log/tests/destination_test.sh.in
+++ b/src/lib/log/tests/destination_test.sh.in
@@ -37,10 +37,10 @@ passfail() {
 
 echo "1. One logger, multiple destinations:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
 .
 rm -f $destfile1 $destfile2
 ./logger_example -s error -f $destfile1 -f $destfile2
@@ -61,13 +61,13 @@ rm -f $destfile1 $destfile2
 # Output for example.alpha should have done to destfile2.
 
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
-INFO  [example.beta] MSG_READERR, error reading from message file beta: info
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+INFO  [example.beta] MSG_READERR error reading from message file beta: info
 .
 echo -n  "   - destination 1:"
 cut -d' ' -f3- $destfile1 | diff $tempfile -
@@ -75,7 +75,7 @@ passfail $?
 
 echo -n  "   - destination 2:"
 cat > $tempfile << .
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
 .
 cut -d' ' -f3- $destfile2 | diff $tempfile -
 passfail $?
diff --git a/src/lib/log/tests/local_file_test.sh.in b/src/lib/log/tests/local_file_test.sh.in
index 53b0c2f..4308f96 100755
--- a/src/lib/log/tests/local_file_test.sh.in
+++ b/src/lib/log/tests/local_file_test.sh.in
@@ -37,36 +37,35 @@ passfail() {
 # Create the local message file for testing
 
 cat > $localmes << .
-\$PREFIX MSG_
-% NOTHERE     this message is not in the global dictionary
-% READERR     replacement read error, parameters: '%1' and '%2'
-% RDLOCMES    replacement read local message file, parameter is '%1'
+% MSG_NOTHERE     this message is not in the global dictionary
+% MSG_READERR     replacement read error, parameters: '%1' and '%2'
+% MSG_RDLOCMES    replacement read local message file, parameter is '%1'
 .
 
 echo -n "1. Local message replacement:"
 cat > $tempfile << .
-WARN  [example.log] MSG_IDNOTFND, could not replace message text for 'MSG_NOTHERE': no such message
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, replacement read local message file, parameter is 'dummy/file'
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, replacement read error, parameters: 'a.txt' and 'dummy reason'
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
+WARN  [example.log] MSG_IDNOTFND could not replace message text for 'MSG_NOTHERE': no such message
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES replacement read local message file, parameter is 'dummy/file'
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR replacement read error, parameters: 'a.txt' and 'dummy reason'
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
 .
 ./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
 passfail $?
 
 echo -n "2. Report error if unable to read local message file:"
 cat > $tempfile << .
-ERROR [example.log] MSG_OPENIN, unable to open message file $localmes for input: No such file or directory
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
+ERROR [example.log] MSG_OPENIN unable to open message file $localmes for input: No such file or directory
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
 .
 rm -f $localmes
 ./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
diff --git a/src/lib/log/tests/logger_support_unittest.cc b/src/lib/log/tests/logger_support_unittest.cc
new file mode 100644
index 0000000..7e5d23a
--- /dev/null
+++ b/src/lib/log/tests/logger_support_unittest.cc
@@ -0,0 +1,72 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <log/messagedef.h>
+
+using namespace isc::log;
+
+// Check that the initialized flag can be manipulated.  This is a bit chicken-
+// -and-egg: we want to reset to the flag to the original value at the end
+// of the test, so use the functions to do that.  But we are trying to check
+// that these functions in fact work.
+
+TEST(LoggerSupportTest, InitializedFlag) {
+    bool current_flag = isLoggingInitialized();
+
+    // check we can flip the flag.
+    setLoggingInitialized(!current_flag);
+    EXPECT_NE(current_flag, isLoggingInitialized());
+    setLoggingInitialized(!isLoggingInitialized());
+    EXPECT_EQ(current_flag, isLoggingInitialized());
+
+    // Check we can set it to explicit values (tests that a call to the "set"
+    // function does not just flip the flag).
+    setLoggingInitialized(false);
+    EXPECT_FALSE(isLoggingInitialized());
+    setLoggingInitialized(false);
+    EXPECT_FALSE(isLoggingInitialized());
+
+    setLoggingInitialized(true);
+    EXPECT_TRUE(isLoggingInitialized());
+    setLoggingInitialized(true);
+    EXPECT_TRUE(isLoggingInitialized());
+
+    // Reset to original value
+    setLoggingInitialized(current_flag);
+}
+
+// Check that a logger will throw an exception if logging has not been
+// initialized.
+
+TEST(LoggerSupportTest, LoggingInitializationCheck) {
+
+    // Assert that logging has been initialized (it should be in main()).
+    bool current_flag = isLoggingInitialized();
+    EXPECT_TRUE(current_flag);
+
+    // Flag that it has not been initialized and declare a logger. Any logging
+    // operation should then throw.
+    setLoggingInitialized(false);
+    isc::log::Logger test_logger("test");
+
+    EXPECT_THROW(test_logger.isDebugEnabled(), LoggingNotInitialized);
+    EXPECT_THROW(test_logger.info(MSG_OPENIN), LoggingNotInitialized);
+
+    // ... and check that they work when logging is initialized.
+    setLoggingInitialized(true);
+    EXPECT_NO_THROW(test_logger.isDebugEnabled());
+    EXPECT_NO_THROW(test_logger.info(MSG_OPENIN));
+}
diff --git a/src/lib/log/tests/severity_test.sh.in b/src/lib/log/tests/severity_test.sh.in
index 6f4d27a..0a304e0 100755
--- a/src/lib/log/tests/severity_test.sh.in
+++ b/src/lib/log/tests/severity_test.sh.in
@@ -35,44 +35,44 @@ passfail() {
 
 echo -n "1. runInitTest default parameters:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
-INFO  [example.alpha] MSG_OPENIN, unable to open message file example.msg for input: dummy reason
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
-INFO  [example.beta] MSG_READERR, error reading from message file beta: info
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+INFO  [example.alpha] MSG_OPENIN unable to open message file example.msg for input: dummy reason
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+INFO  [example.beta] MSG_READERR error reading from message file beta: info
 .
 ./logger_example -c stdout | cut -d' ' -f3- | diff $tempfile -
 passfail $?
 
 echo -n "2. Severity filter:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
 .
 ./logger_example -c stdout -s error | cut -d' ' -f3- | diff $tempfile -
 passfail $?
 
 echo -n "3. Debug level:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
-INFO  [example.alpha] MSG_OPENIN, unable to open message file example.msg for input: dummy reason
-DEBUG [example] MSG_RDLOCMES, reading local message file example/0
-DEBUG [example] MSG_RDLOCMES, reading local message file example/24
-DEBUG [example] MSG_RDLOCMES, reading local message file example/25
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
-INFO  [example.beta] MSG_READERR, error reading from message file beta: info
-DEBUG [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta/25
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+INFO  [example.alpha] MSG_OPENIN unable to open message file example.msg for input: dummy reason
+DEBUG [example] MSG_RDLOCMES reading local message file example/0
+DEBUG [example] MSG_RDLOCMES reading local message file example/24
+DEBUG [example] MSG_RDLOCMES reading local message file example/25
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+INFO  [example.beta] MSG_READERR error reading from message file beta: info
+DEBUG [example.beta] MSG_BADSEVERITY unrecognized log severity: beta/25
 .
 ./logger_example -c stdout -s debug -d 25 | cut -d' ' -f3- | diff $tempfile -
 passfail $?
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index dc19758..4e49501 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -6,6 +6,13 @@ EXTRA_DIST = $(PYTESTS)
 EXTRA_DIST += sendcmd.py
 EXTRA_DIST += test_session.py
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -15,6 +22,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	BIND10_TEST_SOCKET_FILE=$(builddir)/test_socket.sock \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 84809f1..0869160 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -39,6 +39,10 @@
 from isc.cc import Session
 from isc.config.config_data import ConfigData, MultiConfigData, BIND10_CONFIG_DATA_VERSION
 import isc
+from isc.util.file import path_search
+import bind10_config
+from isc.log import log_config_update
+import json
 
 class ModuleCCSessionError(Exception): pass
 
@@ -116,6 +120,18 @@ def create_command(command_name, params = None):
     msg = { 'command': cmd }
     return msg
 
+def default_logconfig_handler(new_config, config_data):
+    errors = []
+
+    if config_data.get_module_spec().validate_config(False, new_config, errors):
+        isc.log.log_config_update(json.dumps(new_config),
+            json.dumps(config_data.get_module_spec().get_full_spec()))
+    else:
+        # no logging here yet, TODO: log these errors
+        print("Error in logging configuration, ignoring config update: ")
+        for err in errors:
+            print(err)
+
 class ModuleCCSession(ConfigData):
     """This class maintains a connection to the command channel, as
        well as configuration options for modules. The module provides
@@ -126,7 +142,7 @@ class ModuleCCSession(ConfigData):
        callbacks are called when 'check_command' is called on the
        ModuleCCSession"""
        
-    def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
+    def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None, handle_logging_config = False):
         """Initialize a ModuleCCSession. This does *NOT* send the
            specification and request the configuration yet. Use start()
            for that once the ModuleCCSession has been initialized.
@@ -149,6 +165,11 @@ class ModuleCCSession(ConfigData):
         self._session.group_subscribe(self._module_name, "*")
 
         self._remote_module_configs = {}
+        self._remote_module_callbacks = {}
+
+        if handle_logging_config:
+            self.add_remote_config(path_search('logging.spec', bind10_config.PLUGIN_PATHS),
+                                   default_logconfig_handler)
 
     def __del__(self):
         # If the CC Session obejct has been closed, it returns
@@ -218,6 +239,9 @@ class ModuleCCSession(ConfigData):
                             newc = self._remote_module_configs[module_name].get_local_config()
                             isc.cc.data.merge(newc, new_config)
                             self._remote_module_configs[module_name].set_local_config(newc)
+                            if self._remote_module_callbacks[module_name] != None:
+                                self._remote_module_callbacks[module_name](new_config,
+                                                                           self._remote_module_configs[module_name])
                         # For other modules, we're not supposed to answer
                         return
 
@@ -260,7 +284,7 @@ class ModuleCCSession(ConfigData):
            and return an answer created with create_answer()"""
         self._command_handler = command_handler
 
-    def add_remote_config(self, spec_file_name):
+    def add_remote_config(self, spec_file_name, config_update_callback = None):
         """Gives access to the configuration of a different module.
            These remote module options can at this moment only be
            accessed through get_remote_config_value(). This function
@@ -289,9 +313,12 @@ class ModuleCCSession(ConfigData):
             if rcode == 0:
                 if value != None and module_spec.validate_config(False, value):
                     module_cfg.set_local_config(value);
+                    if config_update_callback is not None:
+                        config_update_callback(value, module_cfg)
 
         # all done, add it
         self._remote_module_configs[module_name] = module_cfg
+        self._remote_module_callbacks[module_name] = config_update_callback
         return module_name
         
     def remove_remote_config(self, module_name):
@@ -299,6 +326,7 @@ class ModuleCCSession(ConfigData):
         if module_name in self._remote_module_configs:
             self._session.group_unsubscribe(module_name)
             del self._remote_module_configs[module_name]
+            del self._remote_module_callbacks[module_name]
 
     def get_remote_config_value(self, module_name, identifier):
         """Returns the current setting for the given identifier at the
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 60da781..b5f5501 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -4,6 +4,13 @@ PYTESTS += module_spec_test.py
 EXTRA_DIST = $(PYTESTS)
 EXTRA_DIST += unittest_fakesession.py
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -13,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
 	CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
 	CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index a0cafd6..830cbd7 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -22,6 +22,7 @@ import os
 from isc.config.ccsession import *
 from isc.config.config_data import BIND10_CONFIG_DATA_VERSION
 from unittest_fakesession import FakeModuleCCSession, WouldBlockForever
+import bind10_config
 
 class TestHelperFunctions(unittest.TestCase):
     def test_parse_answer(self):
@@ -604,7 +605,43 @@ class TestModuleCCSession(unittest.TestCase):
         self.assertEqual(len(fake_session.message_queue), 1)
         mccs.check_command()
         self.assertEqual(len(fake_session.message_queue), 0)
-        
+
+    def test_logconfig_handler(self):
+        # test whether default_logconfig_handler reacts nicely to
+        # bad data. We assume the actual logger output is tested
+        # elsewhere
+        self.assertRaises(TypeError, default_logconfig_handler);
+        self.assertRaises(TypeError, default_logconfig_handler, 1);
+
+        spec = isc.config.module_spec_from_file(
+            path_search('logging.spec', bind10_config.PLUGIN_PATHS))
+        config_data = ConfigData(spec)
+
+        self.assertRaises(TypeError, default_logconfig_handler, 1, config_data)
+
+        default_logconfig_handler({}, config_data)
+
+        # Wrong data should not raise, but simply not be accepted
+        # This would log a lot of errors, so we may want to suppress that later
+        default_logconfig_handler({ "bad_data": "indeed" }, config_data)
+        default_logconfig_handler({ "bad_data": 1}, config_data)
+        default_logconfig_handler({ "bad_data": 1123 }, config_data)
+        default_logconfig_handler({ "bad_data": True }, config_data)
+        default_logconfig_handler({ "bad_data": False }, config_data)
+        default_logconfig_handler({ "bad_data": 1.1 }, config_data)
+        default_logconfig_handler({ "bad_data": [] }, config_data)
+        default_logconfig_handler({ "bad_data": [[],[],[[1, 3, False, "foo" ]]] },
+                                  config_data)
+        default_logconfig_handler({ "bad_data": [ 1, 2, { "b": { "c": "d" } } ] },
+                                  config_data)
+
+        # Try a correct config
+        log_conf = {"loggers":
+                       [{"name": "b10-xfrout", "output_options":
+                           [{"output": "/tmp/bind10.log",
+                                       "destination": "file",
+                                       "flush": True}]}]}
+        default_logconfig_handler(log_conf, config_data)
 
 class fakeData:
     def decode(self):
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index a6c5c58..6f6d157 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -6,6 +6,13 @@ EXTRA_DIST += testdata/brokendb.sqlite3
 EXTRA_DIST += testdata/example.com.sqlite3
 CLEANFILES = $(abs_builddir)/example.com.out.sqlite3
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -15,6 +22,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
 	TESTDATA_PATH=$(abs_srcdir)/testdata \
 	TESTDATA_WRITE_PATH=$(abs_builddir) \
diff --git a/src/lib/python/isc/log/Makefile.am b/src/lib/python/isc/log/Makefile.am
index 26735e7..b228caf 100644
--- a/src/lib/python/isc/log/Makefile.am
+++ b/src/lib/python/isc/log/Makefile.am
@@ -15,6 +15,9 @@ log_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 log_la_LDFLAGS = $(PYTHON_LDFLAGS)
 log_la_LDFLAGS += -module
 log_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
+log_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+log_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
+log_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 log_la_LIBADD += $(PYTHON_LIB)
 
 # This is not installed, it helps locate the module during tests
diff --git a/src/lib/python/isc/log/__init__.py b/src/lib/python/isc/log/__init__.py
index ad77dff..641cf79 100644
--- a/src/lib/python/isc/log/__init__.py
+++ b/src/lib/python/isc/log/__init__.py
@@ -23,7 +23,11 @@
 # Should we look there? Or define something in bind10_config?
 
 import os
-cwd = os.getcwd()
-pos = cwd.rfind('/src/')
-import sys; sys.path.insert(0, cwd[:pos] + '/src/lib/python/isc/log/.libs')
+import sys
+
+for base in sys.path[:]:
+    loglibdir = os.path.join(base, 'isc/log/.libs')
+    if os.path.exists(loglibdir):
+        sys.path.insert(0, loglibdir)
+
 from log import *
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index b29c005..484151f 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -22,6 +22,8 @@
 #include <log/logger_manager.h>
 #include <log/logger.h>
 
+#include <config/ccsession.h>
+
 #include <string>
 #include <boost/bind.hpp>
 
@@ -39,7 +41,12 @@ using boost::bind;
 // to solve this issue.
 namespace clang_unnamed_namespace_workaround {
     // To propagate python exceptions through our code
-    class InternalError {};
+    // This exception is used to signal to the calling function that a
+    // proper Python Exception has already been set, and the caller
+    // should now return NULL.
+    // Since it is only used internally, and should not pass any
+    // information itself, is is not derived from std::exception
+    class InternalError : public std::exception {};
 }
 using namespace clang_unnamed_namespace_workaround;
 
@@ -177,6 +184,47 @@ init(PyObject*, PyObject* args) {
     Py_RETURN_NONE;
 }
 
+PyObject*
+logConfigUpdate(PyObject*, PyObject* args) {
+    // we have no wrappers for ElementPtr and ConfigData,
+    // So we expect JSON strings and convert them.
+    // The new_config object is assumed to have been validated.
+
+    const char* new_config_json;
+    const char* mod_spec_json;
+    if (!PyArg_ParseTuple(args, "ss",
+                          &new_config_json, &mod_spec_json)) {
+        return (NULL);
+    }
+
+    try {
+        isc::data::ConstElementPtr new_config =
+            isc::data::Element::fromJSON(new_config_json);
+        isc::data::ConstElementPtr mod_spec_e =
+            isc::data::Element::fromJSON(mod_spec_json);
+        isc::config::ModuleSpec mod_spec(mod_spec_e);
+        isc::config::ConfigData config_data(mod_spec);
+        isc::config::default_logconfig_handler("logging", new_config,
+                                               config_data);
+
+        Py_RETURN_NONE;
+    } catch (const isc::data::JSONError& je) {
+        std::string error_msg = std::string("JSON format error: ") + je.what();
+        PyErr_SetString(PyExc_TypeError, error_msg.c_str());
+    } catch (const isc::data::TypeError& de) {
+        PyErr_SetString(PyExc_TypeError, "argument 1 of log_config_update "
+                                         "is not a map of config data");
+    } catch (const isc::config::ModuleSpecError& mse) {
+        PyErr_SetString(PyExc_TypeError, "argument 2 of log_config_update "
+                                         "is not a correct module specification");
+    } catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+    } catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+    }
+    return (NULL);
+}
+
 PyMethodDef methods[] = {
     {"set_test_dictionary", setTestDictionary, METH_VARARGS,
         "Set or unset testing mode for message dictionary. In testing, "
@@ -198,6 +246,19 @@ PyMethodDef methods[] = {
         "logging severity (one of 'DEBUG', 'INFO', 'WARN', 'ERROR' or "
         "'FATAL'), a debug level (integer in the range 0-99) and a file name "
         "of a dictionary with message text translations."},
+    {"log_config_update", logConfigUpdate, METH_VARARGS,
+        "Update logger settings. This method is automatically used when "
+        "ModuleCCSession is initialized with handle_logging_config set "
+        "to True. When called, the first argument is the new logging "
+        "configuration (in JSON format). The second argument is "
+        "the raw specification (as returned from "
+        "ConfigData.get_module_spec().get_full_spec(), and converted to "
+        "JSON format).\n"
+        "Raises a TypeError if either argument is not a (correct) JSON "
+        "string, or if the spec is not a correct spec.\n"
+        "If this call succeeds, the global logger settings have "
+        "been updated."
+    },
     {NULL, NULL, 0, NULL}
 };
 
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 0eacbb1..6bb67de 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS) log_console.py.in console.out check_output.sh
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -23,5 +23,6 @@ endif
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/log/tests/console.out b/src/lib/python/isc/log/tests/console.out
index d3c6770..fbb1bb9 100644
--- a/src/lib/python/isc/log/tests/console.out
+++ b/src/lib/python/isc/log/tests/console.out
@@ -1,4 +1,4 @@
-INFO  [test.output] MSG_ID, Message with list [1, 2, 3, 4]
-WARN  [test.output] DIFFERENT, Different message
-FATAL [test.output] MSG_ID, Message with 2 1
-DEBUG [test.output] MSG_ID, Message with 3 2
+INFO  [test.output] MSG_ID Message with list [1, 2, 3, 4]
+WARN  [test.output] DIFFERENT Different message
+FATAL [test.output] MSG_ID Message with 2 1
+DEBUG [test.output] MSG_ID Message with 3 2
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index a463d59..4292b6c 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -16,6 +16,9 @@
 # This tests it can be loaded, nothing more yet
 import isc.log
 import unittest
+import json
+import bind10_config
+from isc.config.ccsession import path_search
 
 class LogDict(unittest.TestCase):
     def setUp(self):
@@ -52,6 +55,33 @@ class Manager(unittest.TestCase):
         # ignore errors like missing file?
         isc.log.init("root", "INFO", 0, "/no/such/file");
 
+    def test_log_config_update(self):
+        log_spec = json.dumps(isc.config.module_spec_from_file(path_search('logging.spec', bind10_config.PLUGIN_PATHS)).get_full_spec())
+
+        self.assertRaises(TypeError, isc.log.log_config_update)
+        self.assertRaises(TypeError, isc.log.log_config_update, 1)
+        self.assertRaises(TypeError, isc.log.log_config_update, 1, 1)
+        self.assertRaises(TypeError, isc.log.log_config_update, 1, 1, 1)
+
+        self.assertRaises(TypeError, isc.log.log_config_update, 1, log_spec)
+        self.assertRaises(TypeError, isc.log.log_config_update, [], log_spec)
+        self.assertRaises(TypeError, isc.log.log_config_update, "foo", log_spec)
+        self.assertRaises(TypeError, isc.log.log_config_update, "{ '", log_spec)
+
+        # empty should pass
+        isc.log.log_config_update("{}", log_spec)
+
+        # bad spec
+        self.assertRaises(TypeError, isc.log.log_config_update, "{}", json.dumps({"foo": "bar"}))
+
+        # Try a correct one
+        log_conf = json.dumps({"loggers":
+                                [{"name": "b10-xfrout", "output_options":
+                                    [{"output": "/tmp/bind10.log",
+                                       "destination": "file",
+                                       "flush": True}]}]})
+        isc.log.log_config_update(log_conf, log_spec)
+
 class Logger(unittest.TestCase):
     def tearDown(self):
         isc.log.reset()
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 73528d2..3a04f17 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = addr_test.py parse_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index a83ff86..1427d93 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index 0ce96de..c3d35c2 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = process_test.py socketserver_mixin_test.py file_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/tools/system_messages.py b/tools/system_messages.py
new file mode 100644
index 0000000..6cf3ce9
--- /dev/null
+++ b/tools/system_messages.py
@@ -0,0 +1,413 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Produce System Messages Manual
+#
+# This tool reads all the .mes files in the directory tree whose root is given
+# on the command line and interprets them as BIND 10 message files.  It pulls
+# all the messages and description out, sorts them by message ID, and writes
+# them out as a single (formatted) file.
+#
+# Invocation:
+# The code is invoked using the command line:
+#
+# python system_messages.py [-o <output-file>] <top-source-directory>
+#
+# If no output file is specified, output is written to stdout.
+
+import re
+import os
+import sys
+from optparse import OptionParser
+
+# Main dictionary holding all the messages.  The messages are accumulated here
+# before being printed in alphabetical order.
+dictionary = {}
+
+# The structure of the output page is:
+#
+#        header
+#           message
+#        separator
+#           message
+#        separator
+#          :
+#        separator
+#           message
+#        trailer
+#
+# (Indentation is not relevant - it has only been added to the above
+# illustration to make the structure clearer.)  The text of these section is:
+
+# Header - this is output before anything else.
+SEC_HEADER="""<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash  "&#x2014;" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<book>
+  <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+  <bookinfo>
+    <title>BIND 10 Messages Manual</title>
+
+    <copyright>
+      <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+    </copyright>
+
+    <abstract>
+      <para>BIND 10 is a Domain Name System (DNS) suite managed by
+	  Internet Systems Consortium (ISC). It includes DNS libraries
+	  and modular components for controlling authoritative and
+	  recursive DNS servers.
+      </para>
+      <para>
+        This is the messages manual for BIND 10 version &__VERSION__;.
+	    The most up-to-date version of this document, along with
+	    other documents for BIND 10, can be found at
+        <ulink url="http://bind10.isc.org/docs"/>.
+      </para>
+    </abstract>
+
+    <releaseinfo>This is the messages manual for BIND 10 version
+        &__VERSION__;.</releaseinfo>
+  </bookinfo>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+      This document lists each message that can be logged by the
+      programs in the BIND 10 package.  Each entry in this manual
+      is of the form:
+      <screen>IDENTIFICATION message-text</screen>
+      ... where "IDENTIFICATION" is the message identification included
+      in each message logged and "message-text" is the accompanying
+      message text.  The "message-text" may include placeholders of the
+      form "%1", "%2" etc.; these parameters are replaced by relevant
+      values when the message is logged.
+    </para>
+    <para>
+      Each entry is also accompanied by a description giving more
+      information about the circumstances that result in the message
+      being logged.
+    </para>
+    <para>
+      For information on configuring and using BIND 10 logging,
+      refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+    </para>
+  </chapter>
+
+  <chapter id="messages">
+    <title>BIND 10 Messages</title>
+    <para>
+      <variablelist>
+"""
+
+# This is output once for each message.  The string contains substitution
+# tokens: $I is replaced by the message identification, $T by the message text,
+# and $D by the message description.
+SEC_MESSAGE = """<varlistentry id="$I">
+<term>$I $T</term>
+<listitem><para>
+$D
+</para></listitem>
+</varlistentry>"""
+
+# A description may contain blank lines intended to separate paragraphs.  If so,
+# each blank line is replaced by the following.
+SEC_BLANK = "</para><para>"
+
+# The separator is copied to the output verbatim after each message except
+# the last.
+SEC_SEPARATOR = ""
+
+# The trailier is copied to the output verbatim after the last message.
+SEC_TRAILER = """      </variablelist>
+    </para>
+  </chapter>
+</book>"""
+
+
+def reportError(filename, what):
+    """Report an error and exit"""
+    print("*** ERROR in ", filename, file=sys.stderr)
+    print("*** REASON: ", what, file=sys.stderr)
+    print("*** System message generator terminating", file=sys.stderr)
+    sys.exit(1)
+
+
+
+def replaceTag(string):
+    """Replaces the '<' and '>' in text about to be inserted into the template
+       sections above with < and > to avoid problems with message text
+       being interpreted as XML text.
+    """
+    string1 = string.replace("<", "<")
+    string2 = string1.replace(">", ">")
+    return string2
+
+
+
+def replaceBlankLines(lines):
+    """Replaces blank lines in an array with the contents of the 'blank'
+       section.
+    """
+    result = []
+    for l in lines:
+        if len(l) == 0:
+            result.append(SEC_BLANK)
+        else:
+            result.append(l)
+
+    return result
+
+
+
+# Printing functions
+def printHeader():
+    print(SEC_HEADER)
+
+def printSeparator():
+    print(SEC_SEPARATOR)
+
+def printMessage(msgid):
+    # In the message ID, replace "<" and ">" with XML-safe versions and
+    # substitute into the data.
+    m1 = SEC_MESSAGE.replace("$I", replaceTag(msgid))
+
+    # Do the same for the message text.
+    m2 = m1.replace("$T", replaceTag(dictionary[msgid]['text']))
+
+    # Do the same for the description then replace blank lines with the
+    # specified separator.  (We do this in that order to avoid replacing
+    # the "<" and ">" in the XML tags in the separator.)
+    desc1 = [replaceTag(l) for l in dictionary[msgid]['description']]
+    desc2 = replaceBlankLines(desc1)
+
+    # Join the lines together to form a single string and insert into
+    # current text.
+    m3 = m2.replace("$D", "\n".join(desc2))
+
+    print(m3)
+
+def printTrailer():
+    print(SEC_TRAILER)
+
+
+
+def removeEmptyLeadingTrailing(lines):
+    """Removes leading and trailing empty lines.
+
+       A list of strings is passed as argument, some of which may be empty.
+       This function removes from the start and end of list a contiguous
+       sequence of empty lines and returns the result.  Embedded sequence of
+       empty lines are not touched.
+
+       Parameters:
+       lines List of strings to be modified.
+
+       Return:
+       Input list of strings with leading/trailing blank line sequences
+       removed.
+    """
+
+    retlines = []
+
+    # Dispose of degenerate case of empty array
+    if len(lines) == 0:
+        return retlines
+
+    # Search for first non-blank line
+    start = 0
+    while start < len(lines):
+        if len(lines[start]) > 0:
+            break
+        start = start + 1
+
+    # Handle case when entire list is empty
+    if start >= len(lines):
+        return retlines
+
+    # Search for last non-blank line
+    finish = len(lines) - 1
+    while finish >= 0:
+        if len(lines[finish]) > 0:
+            break
+        finish = finish - 1
+
+    retlines = lines[start:finish + 1]
+    return retlines
+
+
+
+def addToDictionary(msgid, msgtext, desc, filename):
+    """Add the current message ID and associated information to the global
+       dictionary.  If a message with that ID already exists, loop appending
+       suffixes of the form "(n)" to it until one is found that doesn't.
+
+       Parameters:
+       msgid        Message ID
+       msgtext      Message text
+       desc         Message description
+       filename     File from which the message came.  Currently this is
+                    not used, but a future enhancement may wish to include the
+                    name of the message file in the messages manual.
+    """
+
+    # If the ID is in the dictionary, append a "(n)" to the name - this wil
+    # flag that there are multiple instances.  (However, this is an error -
+    # each ID should be unique in BIND-10.)
+    if msgid in dictionary:
+        i = 1
+        while msgid + " (" + str(i) + ")" in dictionary:
+            i = i + 1
+        msgid = msgid + " (" + str(i) + ")"
+
+    # Remove leading and trailing blank lines in the description, then
+    # add everything into a subdictionary which is then added to the main
+    # one.
+    details = {}
+    details['text'] = msgtext
+    details['description'] = removeEmptyLeadingTrailing(desc)
+    details['filename'] = filename
+    dictionary[msgid] = details
+
+
+
+def processFileContent(filename, lines):
+    """Processes file content.  Messages and descriptions are identified and
+       added to a dictionary (keyed by message ID).  If the key already exists,
+       a numeric suffix is added to it.
+
+       Parameters:
+       filename     Name of the message file being processed
+       lines        Lines read from the file
+    """
+
+    prefix = ""         # Last prefix encountered
+    msgid = ""          # Last message ID encountered
+    msgtext = ""        # Text of the message
+    description = []    # Description
+
+    for l in lines:
+        if l.startswith("$"):
+            # Starts with "$".  Ignore anything other than $PREFIX
+            words = re.split("\s+", l)
+            if words[0].upper() == "$PREFIX":
+                if len(words) == 1:
+                    prefix = ""
+                else:
+                    prefix = words[1]
+
+        elif l.startswith("%"):
+            # Start of a message.  Add the message we were processing to the
+            # dictionary and clear everything apart from the file name.
+            if msgid != "":
+                addToDictionary(msgid, msgtext, description, filename)
+
+            msgid = ""
+            msgtext = ""
+            description = []
+
+            # Start of a message
+            l = l[1:].strip()       # Remove "%" and trim leading spaces
+            if len(l) == 0:
+                printError(filename, "Line with single % found")
+                next
+
+            # Split into words.  The first word is the message ID
+            words = re.split("\s+", l)
+            msgid = (prefix + words[0]).upper()
+            msgtext = l[len(words[0]):].strip()
+
+        else:
+            # Part of a description, so add to the current description array
+            description.append(l)
+
+    # All done, add the last message to the global dictionaty.
+    if msgid != "":
+        addToDictionary(msgid, msgtext, description, filename)
+
+
+
+def processFile(filename):
+    """Processes a file by reading it in and stripping out all comments and
+       and directives.  Leading and trailing blank lines in the file are removed
+       and the remainder passed for message processing.
+
+       Parameters:
+       filename     Name of the message file to process
+    """
+    lines = open(filename).readlines();
+
+    # Trim leading and trailing spaces from each line, and remove comments.
+    lines = [l.strip() for l in lines]
+    lines = [l for l in lines if not l.startswith("#")]
+
+    # Remove leading/trailing empty line sequences from the result
+    lines = removeEmptyLeadingTrailing(lines)
+
+    # Interpret content
+    processFileContent(filename, lines)
+
+
+
+def processAllFiles(root):
+    """Iterates through all files in the tree starting at the given root and
+       calls processFile for all .mes files found.
+
+       Parameters:
+       root     Directory that is the root of the BIND-10 source tree
+    """
+    for (path, dirs, files) in os.walk(root):
+
+        # Identify message files
+        mes_files = [f for f in files if f.endswith(".mes")]
+
+        # ... and process each file in the list
+        for m in mes_files:
+            processFile(path + os.sep + m)
+
+
+# Main program
+if __name__ == "__main__":
+    parser = OptionParser(usage="Usage: %prog [--help | options] root")
+    parser.add_option("-o", "--output", dest="output", default=None,
+                      metavar="FILE", 
+                      help="output file name (default to stdout)")
+    (options, args) = parser.parse_args()
+
+    if len(args) == 0:
+        parser.error("Must supply directory at which to begin search")
+    elif len(args) > 1:
+        parser.error("Only a single root directory can be given")
+
+    # Redirect output if specified (errors are written to stderr)
+    if options.output is not None:
+        sys.stdout = open(options.output, 'w')
+
+    # Read the files and load the data
+    processAllFiles(args[0])
+
+    # Now just print out everything we've read (in alphabetical order).
+    count = 1
+    printHeader()
+    for msgid in sorted(dictionary):
+        if count > 1:
+            printSeparator()
+        count = count + 1
+        printMessage(msgid)
+    printTrailer()




More information about the bind10-changes mailing list