BIND 10 trac1012, updated. 502100d7b9cd9d2300e78826a3bddd024ef38a74 [trac1012] Merge branch 'master' into trac1012

BIND 10 source code commits bind10-changes at lists.isc.org
Mon Jun 27 12:01:02 UTC 2011


The branch, trac1012 has been updated
       via  502100d7b9cd9d2300e78826a3bddd024ef38a74 (commit)
       via  b477df5d4dbce5b72ebd183b83555f62aa3fcec5 (commit)
       via  5d5173ef0cc48d206464b39f696d03bae9daecea (commit)
       via  7cc074aacd5159778111fa4cbdbe1c89e6a4e51b (commit)
       via  dc087934c1a1946cfdcf63b49a70aa0fefe6b282 (commit)
       via  feed2b3537a4e57e4cb55232242c6622d1fcc654 (commit)
       via  fb032e397153a63e4f1bd3b9b7fc1a89c01e7d6f (commit)
       via  6bc6c57d5761ccd2ef65291e81bbfd995b4758a9 (commit)
       via  0f1b7a45520517a40b7b85d57d461e20e81b7aa9 (commit)
       via  885a4ecf9c87b8e3a028b6488b0e6b853365edc8 (commit)
       via  8d5a5b95c85af1f15654fe164f306fe21065ea73 (commit)
       via  77367a5d67709b65afd8689159e5192416326cb7 (commit)
       via  0f4c693c3399bd9ecf2d2a5682fda8ed1eb8158f (commit)
       via  877e89713ad2398b6637b843a22c3b12607fe5bb (commit)
       via  33e08ca107c127d5c158882e7f2e86770a48c572 (commit)
       via  32fb3ad97a7ccc65ef391b84c8f488d4ea71e963 (commit)
       via  04e7fe3f480462d288c17bd121a368b74292cfd3 (commit)
       via  354fcf46bf93f1e2e317043f2998a8b17f22fe04 (commit)
       via  21acce853a4269f0db76dc2768bb7c5107b1b7d4 (commit)
       via  c021505a1a0d6ecb15a8fd1592b94baff6d115f4 (commit)
       via  02aa9813c1f6829bb9089400c5397f3faba7d9e0 (commit)
       via  3017593b63f34c4bc69494be8c80327eaad5d922 (commit)
       via  62bc6cce6fe7343c4ef06c7e690939fd0aa20148 (commit)
       via  c58fa9e4c5aa486bb270681a45a4f0f7e04b4139 (commit)
       via  89324744df3f73de1beaefb9420aeab5f9ff7824 (commit)
       via  f9070aee950581a47c0916cb1f3b48cd4bfcb7f4 (commit)
       via  ea15d26afc9ced4a11aea6733ea3df0969c5618b (commit)
       via  f685e5c06c382180eb1775bce714ea60154b08f2 (commit)
       via  5a19ee14367d9bb796c8e43c034ee9f327052c86 (commit)
       via  f92d30bb55decf9ed4d7cdf10231dfe2913ca11a (commit)
       via  461a9d0a1e896e0a1b676c6873f74404d5ab95c1 (commit)
       via  bc81810505f7263aedb8654d139510058c251626 (commit)
       via  b57c51e6ddfc6770d5c66eab5aeb1a5238e5a7ea (commit)
       via  ddb1b2241fc03a1d08dea42907ee8f859d3b2f46 (commit)
       via  0b838ba0d3c60203a52d1a918333846116e607cb (commit)
       via  f77021d7838e33e1662b42776ccc49be4435b1f2 (commit)
       via  632cd6151b871e060d09a79f6b8a283cc0ab469c (commit)
       via  81a2df84a879ca5cbaaa61dffce5c413d920011d (commit)
       via  59b380d3682bb9fca26cae2c70c6c49934823f01 (commit)
       via  8b2247a6ae88fbf16bfd65852feb0216a4ea4dac (commit)
       via  1b01a9d09e5ecf21ff8bd9cce1c20372846a775c (commit)
       via  735f817c7f66813135b4ef576c117aa424a5bdad (commit)
       via  fef88019d325474471a353304499e7919023912e (commit)
       via  99522dd887762e71cbf4d895486f0e2f915eabda (commit)
       via  999736efa5e3aaf06949675c4f77e1ef9cd0d71b (commit)
       via  85d5708e2c44e04b1a148610434de2c040d7142b (commit)
       via  e6b3d50483fb739da2ca83e493a1c30043ba0464 (commit)
       via  fc29e92af2bd2cfe8fa77dd311b9382680fd6324 (commit)
       via  9129a474d3289157a4d8eb761383352dbfc2586e (commit)
       via  3f15151252dd734210582a2ae8923dada661231f (commit)
       via  79ec6c320ec5c24036856fd6b589ba8bf8b26ffc (commit)
       via  e53411af37a32d0c9b14515bd90c1e701c69f6e9 (commit)
       via  edea2125fa0791f920e3dd9e45c8aa0c9bfc6eb5 (commit)
       via  8f5fafa643f2d908b9e97b6d08aeb55c4b96addf (commit)
       via  01f9c1c0adfb37d11133c87056161f1edfba2672 (commit)
       via  ac7aaa887d827f8bdf1c2881d245cc655c6847b7 (commit)
       via  ebb6493b8ff763d42fe99438c8befe48c381b4aa (commit)
       via  c786a61641a965545c2e304b1c946afdedc6dc1a (commit)
       via  1efa5d9d7f699cc3ee636d4e1b50b3fb3a863180 (commit)
       via  e5251c4886f626e6ef9f6ba82771c0e949e0071f (commit)
       via  aaad42c52aed2c3890378511ecb2f97a3731d23a (commit)
       via  4beebf47805d0c3f80872e8f690f09c1658ae4e2 (commit)
       via  792c8b202cffc8fed726f10b3514523b1fc92469 (commit)
       via  8c624c6644563ed9c4fecec8b0b5f5dd115fe7ef (commit)
       via  d1c7f98e910bd19d21a649386f1a8066e4f41677 (commit)
       via  a90c8a06056300e0f9f5ffdae72b8a2ba26346fc (commit)
       via  30570ab2d917dc6adec02ba272ee50c17124b688 (commit)
       via  59908b70a929baf829202197d6e7ab5a3557da32 (commit)
       via  585d1c63d6d0126607f424571e38a4a60683cf4b (commit)
       via  d335ae50bb855b7b302dab852005385c0227dcfb (commit)
       via  8034dbfe87c45eaa2c0aef0e715b86fa79a7c4e3 (commit)
       via  0ddf0f5fa4d9d18599a1642b9f87caaa1f463c5e (commit)
       via  5a75094dfdd5f2307c4a1669e05db70355b08682 (commit)
       via  df5bad72ac8dac07a038f29823a1938bc9bbe72c (commit)
       via  24a865aeec3048620dea967cba9bb1df28cfd052 (commit)
       via  6756ff6dacd40b74676b4243bc12ea02a43f3ae9 (commit)
       via  6722a4d52b519ed768fa70b31cdd10da868cacbc (commit)
       via  ca9e1e2997420dea3e3b14fea010ab0af3d75f32 (commit)
       via  5fe9e2f204d67a9ed65ab8fc2a1feb09f6700b5b (commit)
       via  13eb9e8620e67d7c617423ac1992a720ecdfbf7d (commit)
       via  bf41f8dc2265ca0cd9ffb8b8c11047291e69ca3c (commit)
       via  926a65fa08617be677a93e9e388df0f229b01067 (commit)
       via  1c7930a7ba19706d388e4f8dcf2a55a886b74cd2 (commit)
       via  61b01087195d5d1f875f01c5fd2eac5dc61d012d (commit)
       via  84fcd68d77cc4aba23721e234622c33666e96c49 (commit)
       via  96f093b960839b26ce37d9ff470933eed9c2b135 (commit)
       via  e828a215cc73946fa3681fcd88c3ef76b68272bd (commit)
       via  0bea88f134736d6fd2872f77feaf309aca6c1bc6 (commit)
       via  406cb1fd4af84fcbdf8339cf1afdae2cfb3b7946 (commit)
       via  55689c559b3ac60765940d64a5b51007f94bddf7 (commit)
       via  925ac83b98b02abec3f7f2a70b7c83170f851e29 (commit)
       via  3f47015eab1abd9c7193a9e740f794c6a718c9f7 (commit)
       via  4064b389d13d2861083499517f51d89492156099 (commit)
       via  926985d03e3486f1a83615dc2794d310cb2cb520 (commit)
       via  189f58f73fe02cf2729ab26d6ce8ab6469e82a1c (commit)
       via  1ab0f2e8448a20674bfb8d12d463e5b3fec3ac6e (commit)
       via  dcb32f7928972c3ebe66f13a08560a1e19c62866 (commit)
       via  e25099da714a10dd3bc24be0002f9174fb9610c9 (commit)
       via  2d39d007d30f65589cfe4b671dc91cdab70ed107 (commit)
       via  e9798fc8931856a7eaeee37155600146d7dc7c57 (commit)
       via  59e01319e369a7c8e4f9a326d603dee7e3924c6b (commit)
       via  ad5a633a9e77e561675aca5263853db8161e82fa (commit)
       via  6c5a9b252b7bc062ed807aff342d0314811b5bde (commit)
       via  8320629b004d5fc8194afb5d277a0d9e01299121 (commit)
       via  0eb494bd7c49f0559c870d8a687ad0552f2feeb7 (commit)
       via  f79a424a36d3a5896c43c5cae5d88d690ecbe90e (commit)
       via  75bda54b2b5cdf06f334e72cd554b616a887d1cf (commit)
       via  2b97fc4f4f30bff13b94ad9b25766b4a6b2f6655 (commit)
       via  a1301a0545acc48bf2f94731cb26577806e3c383 (commit)
       via  8bb79638bc658d8e57b15ae1b16d28a08ec06a69 (commit)
       via  40cf6abada7f06648643b14b9b7db21d0fde3b27 (commit)
       via  81b49bb4d72fdfb5db8d7ad5f9b086c489acdb86 (commit)
       via  c4430b49b30dcd74226d272fa3da4812afc2c6f7 (commit)
       via  eeebde9d81c4bbc4e5388db5cd6148ca3589b91e (commit)
       via  4fae538655882db7c085dab798b4fb29c4a9d8f1 (commit)
       via  8c5e6268927737a472348d1ff8ecb2201c76b98a (commit)
       via  cda19a7cbc56ddd67c7d19ec7d072a64477d254b (commit)
       via  c65177c8ea0dfba3aaa84ea1bf2583b2d818d23d (commit)
       via  b5cfd5e541d4bbb7f13ad93392018711e19ba0e5 (commit)
       via  a2158e5b2c17043f0f3aa194009408aa73bd62ce (commit)
       via  85b06e8c212c9733cc77e71d8a72c72161dc34f2 (commit)
       via  4f87326ae6c17e26769b4ae276001b49d5bb3561 (commit)
       via  06c9c2a763326d4b30ff9448f726928538fba94c (commit)
       via  70af8c7c72300e1afe1974de22c117ff5566487d (commit)
       via  03e690228b6f5184d67a4ff3de56a861fcac9a23 (commit)
       via  3665d7750795ee247864c5619301bd1638c31175 (commit)
       via  bb22c63e7bf7544d5a765140cc29b87fd1e2410c (commit)
       via  d749aee2ec681e0304dd53c63f276af98edeaf31 (commit)
       via  f9a545ca4c82e51fc7c47793fec34eb5deb19e46 (commit)
       via  a4f1f8de765810aecff1194c74a108682e3de28e (commit)
       via  287edb431de6ae5d7106dd4e593a193908b9ba9f (commit)
       via  2f0a32d552d19ac9224dda91975fb128487714a6 (commit)
       via  99d7c21284686ba3d021a6d09938b82ea56de783 (commit)
       via  dae1d2e24f993e1eef9ab429326652f40a006dfb (commit)
       via  ed6ec070b25a8995bccb3cec1a63cb111e06a6fb (commit)
       via  97131f9739d60c41a530a52c5f2a2861ba68637e (commit)
       via  8cad081427cd8326318ef1a0dc81c1eefaf73d29 (commit)
       via  309b24ff461b623770e950d6ff12654241bdd39b (commit)
       via  52d165984d1a7784a1a6e0a3b845b19559698203 (commit)
       via  a948a71497441782d409e98f4068bd04cadf581a (commit)
       via  69da5b8b1b3ed9f6c6e21cae8a8a86c8fbf83a76 (commit)
       via  8f95286e55b815323110f79d92f338878930509f (commit)
       via  c4c85ce1694bd421912d1902f2d614c15bebbea1 (commit)
       via  f78cf6ebc22712c470da4af720915b09ae8e8ebe (commit)
       via  e31eaa10fc2fc2730311596b7ee4ac16050efe62 (commit)
       via  4c250f85ed6ad7f697c42137f1e67aadacf73dac (commit)
       via  941eceae0a54d023dce0c43757b0104b8adbcc9c (commit)
       via  79143dc457f23670d860a2fa134b13eb62db490b (commit)
       via  80dd433545aecf82aa178365dbc6e0650e12907b (commit)
       via  d867ca0fabdb5398d6a964aa393fadf678af2bbf (commit)
       via  3eb58c78cacf7686435e963d423c6c035a737bc0 (commit)
       via  e90d2063e0bd98767fdcd38962ad5be6f2eda68e (commit)
       via  0699e756aead6ec1b3a80f5e044d8c3cb35e3280 (commit)
       via  384501f85cc9e66a686a96e349241442af29a56b (commit)
       via  707e700d4861b2c47235183ce6e98d985819dd2d (commit)
       via  67a88d3fd748cc42730e142cbfa79d0b7fb7a813 (commit)
       via  910caee23c188c5b9575d87bf479d9caa3ab8d07 (commit)
       via  e1bf2fce994c07e02466016d94007e509f2fe478 (commit)
       via  179a2105dc0a9c9341d4347b711b62c2bbb9ccfb (commit)
       via  f7a8e38c733ab5695bda72b7b69a803c8d98c80b (commit)
       via  62318bb5e15a45164d9cba4a03c9e5583e0b6aa3 (commit)
       via  a9d549be7404552a13a95db041e7e1da64729341 (commit)
       via  dd3c0d1df47590362b21e7d582df513a98942a54 (commit)
       via  3eb8c8e08c993b1458a6d79f434e0305936bcd14 (commit)
       via  e580e10deec55a34efd3fc2825bd80143af67d4a (commit)
       via  de063908e81bac570c2c485a9d91d496835fbbee (commit)
       via  9723ecc53f8afd751d66d4e2db24f8fd05ffb467 (commit)
       via  6fe98e3c2a669c9dc779980426a81fbe1ddcfff3 (commit)
       via  576f337f42e5c72aec48d8ea1d36ab5059588301 (commit)
       via  edfe1b966d53caf3ed9e17cd525b0d94beff0aaf (commit)
       via  bfd50c768ccf03b2e4f3d3ecbeb5fb344ff79129 (commit)
       via  ae21ebb0f609f8a2aa8ffc3d4b84c465111ec2c3 (commit)
       via  7cf66b7e44e389205ae4344764fbf136550854ce (commit)
       via  fd9334b7d856c4f748919d035b2a4ad3c85b545b (commit)
       via  0c3b69c6e170bee7dd775090af2bdd1cae900080 (commit)
       via  0fdc040591f07f5f876ff2a16ea363e9026346ae (commit)
       via  f5edd310465966137f0cd4e2109d90f7e5d5965f (commit)
       via  73ac6b09eeeebcdb03965076d4aa8a8a7a361ebe (commit)
       via  2826df63af5ce079b537fb8beaa09b0138b7c308 (commit)
       via  3cf7f4bf3f7c6947bab97f269172236dea5b9d52 (commit)
       via  f28bffcf3e0369b89631df178865a2de00145f97 (commit)
       via  2a81a3ad5a7fa790d2b03845b81380f395fb5ac8 (commit)
       via  c55ce18325b71ece62d213ef4ab60e6c95c07da8 (commit)
       via  86a307f08882d02ad443e848e096a30ca14ec918 (commit)
       via  91920024ba2897735eb1228a17a2cd2bd0f82c61 (commit)
       via  4fd41636ae9c11a96b3893cbc939fd4854b64b56 (commit)
       via  a149f9e04c33ce6012a4a3e837c98db6d3074a44 (commit)
       via  4e18854902a306ea6ad390eb90a9f43f49c8222b (commit)
       via  88119987e9f33fa12243eacee75bd9ec4c99f936 (commit)
       via  c9abaf3a9e3e78a875e9722649615339dfb75047 (commit)
       via  70d184ad5dac430b1591166893689ecb5b774777 (commit)
       via  a215fa328b934f3d140e433725b2d7a540b826f5 (commit)
       via  717534f5a10d288081a0d8b40644794cc1b38861 (commit)
      from  52adf933c0bed4753a06632b25a46055d23eb655 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 502100d7b9cd9d2300e78826a3bddd024ef38a74
Merge: 52adf933c0bed4753a06632b25a46055d23eb655 b477df5d4dbce5b72ebd183b83555f62aa3fcec5
Author: Stephen Morris <stephen at isc.org>
Date:   Mon Jun 27 13:00:45 2011 +0100

    [trac1012] Merge branch 'master' into trac1012
    
    Conflicts:
    	doc/guide/bind10-guide.html

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |   19 +
 configure.ac                                       |   39 +-
 doc/guide/bind10-guide.xml                         |   35 +-
 src/bin/auth/Makefile.am                           |   14 +-
 .../config/config_log.cc => bin/auth/auth_log.cc}  |   10 +-
 src/bin/auth/auth_log.h                            |   54 ++
 src/bin/auth/auth_messages.mes                     |  260 ++++++++
 src/bin/auth/auth_srv.cc                           |  173 ++----
 src/bin/auth/auth_srv.h                            |   21 -
 src/bin/auth/benchmarks/Makefile.am                |    3 +
 src/bin/auth/command.cc                            |   37 +-
 src/bin/auth/main.cc                               |   55 +-
 src/bin/auth/statistics.cc                         |   35 +-
 src/bin/auth/statistics.h                          |    7 +-
 src/bin/auth/tests/Makefile.am                     |    4 +
 src/bin/auth/tests/auth_srv_unittest.cc            |    9 -
 src/bin/auth/tests/statistics_unittest.cc          |    3 +-
 src/bin/bind10/Makefile.am                         |    5 +
 src/bin/bind10/run_bind10.sh.in                    |    2 +-
 src/bin/bind10/tests/Makefile.am                   |    8 +
 src/bin/bindctl/Makefile.am                        |    5 +
 src/bin/bindctl/run_bindctl.sh.in                  |    8 +
 src/bin/bindctl/tests/Makefile.am                  |    8 +
 src/bin/cfgmgr/Makefile.am                         |    5 +
 src/bin/cfgmgr/plugins/Makefile.am                 |    6 +
 src/bin/cfgmgr/plugins/tests/Makefile.am           |    2 +-
 src/bin/cfgmgr/tests/Makefile.am                   |   13 +
 src/bin/cmdctl/Makefile.am                         |    5 +
 src/bin/cmdctl/tests/Makefile.am                   |    8 +
 src/bin/loadzone/run_loadzone.sh.in                |    8 +
 src/bin/loadzone/tests/correct/Makefile.am         |    9 +-
 src/bin/loadzone/tests/error/Makefile.am           |    9 +-
 src/bin/msgq/Makefile.am                           |    5 +
 src/bin/msgq/tests/Makefile.am                     |    8 +
 src/bin/resolver/resolverdef.mes                   |    4 +-
 src/bin/stats/Makefile.am                          |    5 +
 ...{stats-httpd-xml.tpl.in => stats-httpd-xml.tpl} |    0 
 ...{stats-httpd-xsd.tpl.in => stats-httpd-xsd.tpl} |    0 
 ...{stats-httpd-xsl.tpl.in => stats-httpd-xsl.tpl} |    0 
 .../{stats-httpd.spec.in => stats-httpd.spec}      |    0 
 .../{stats-schema.spec.in => stats-schema.spec}    |    0 
 src/bin/stats/{stats.spec.in => stats.spec}        |    0 
 src/bin/stats/stats_httpd.py.in                    |    9 +-
 src/bin/stats/tests/Makefile.am                    |    5 +
 src/bin/stats/tests/http/Makefile.am               |    4 +
 src/bin/stats/tests/isc/Makefile.am                |    5 +
 src/bin/stats/tests/isc/cc/Makefile.am             |    5 +
 src/bin/stats/tests/isc/config/Makefile.am         |    5 +
 src/bin/stats/tests/isc/util/Makefile.am           |    5 +
 src/bin/tests/Makefile.am                          |    8 +
 src/bin/xfrin/Makefile.am                          |   16 +-
 src/bin/xfrin/tests/Makefile.am                    |    4 +-
 src/bin/xfrin/xfrin.py.in                          |   60 +-
 src/bin/xfrin/xfrin_messages.mes                   |   91 +++
 src/bin/xfrout/Makefile.am                         |    5 +
 src/bin/xfrout/tests/Makefile.am                   |    6 +-
 src/bin/xfrout/tests/xfrout_test.py.in             |    6 +-
 src/bin/xfrout/xfrout.py.in                        |   83 ++--
 src/bin/zonemgr/Makefile.am                        |    5 +
 src/bin/zonemgr/tests/Makefile.am                  |    8 +
 src/lib/acl/Makefile.am                            |   30 +-
 src/lib/{datasrc/logger.cc => acl/dns.cc}          |   17 +-
 src/lib/acl/dns.h                                  |   89 +++
 src/lib/acl/ip_check.cc                            |  111 +++
 src/lib/acl/ip_check.h                             |  354 ++++++++++
 src/lib/{log/log_formatter.cc => acl/loader.cc}    |   40 +-
 src/lib/acl/loader.h                               |  448 +++++++++++++
 src/lib/acl/logic_check.h                          |  206 ++++++
 src/lib/acl/tests/Makefile.am                      |   15 +-
 src/lib/acl/tests/acl_test.cc                      |   68 +--
 src/lib/acl/tests/creators.h                       |  154 +++++
 .../tests/dns_test.cc}                             |   27 +-
 src/lib/acl/tests/ip_check_unittest.cc             |  588 ++++++++++++++++
 src/lib/acl/tests/loader_test.cc                   |  371 +++++++++++
 src/lib/acl/tests/logcheck.h                       |   91 +++
 src/lib/acl/tests/logic_check_test.cc              |  208 ++++++
 src/lib/acl/tests/run_unittests.cc                 |    3 +-
 src/lib/asiodns/Makefile.am                        |   12 +-
 .../asiodns/{asiodef.mes => asiodns_messages.mes}  |   44 +-
 src/lib/asiodns/io_fetch.cc                        |   28 +-
 src/lib/config/Makefile.am                         |   12 +-
 src/lib/config/ccsession.cc                        |   14 +-
 src/lib/config/ccsession.h                         |   19 +
 src/lib/config/config_log.h                        |    2 +-
 .../config/{configdef.mes => config_messages.mes}  |   64 +-
 src/lib/datasrc/Makefile.am                        |   12 +-
 src/lib/datasrc/cache.cc                           |   17 +-
 .../{messagedef.mes => datasrc_messages.mes}       |  689 ++++++++++----------
 src/lib/datasrc/logger.h                           |    2 +-
 src/lib/dns/python/Makefile.am                     |    3 +
 src/lib/dns/python/tests/Makefile.am               |    5 +
 src/lib/exceptions/exceptions.h                    |   11 +
 src/lib/log/compiler/message.cc                    |    4 +-
 src/lib/log/logger.cc                              |   10 +-
 src/lib/log/logger.h                               |   29 +-
 src/lib/log/logger_impl.cc                         |    2 +-
 src/lib/log/logger_manager.cc                      |    4 +-
 src/lib/log/logger_support.cc                      |   47 +-
 src/lib/log/logger_support.h                       |   24 +-
 src/lib/log/tests/Makefile.am                      |    1 +
 src/lib/log/tests/destination_test.sh.in           |   24 +-
 src/lib/log/tests/local_file_test.sh.in            |   39 +-
 src/lib/log/tests/logger_support_unittest.cc       |   72 ++
 src/lib/log/tests/severity_test.sh.in              |   52 +-
 src/lib/python/Makefile.am                         |    4 +
 src/lib/python/isc/Makefile.am                     |    5 +
 src/lib/python/isc/__init__.py                     |    1 -
 src/lib/python/isc/cc/Makefile.am                  |    5 +
 src/lib/python/isc/cc/tests/Makefile.am            |    8 +
 src/lib/python/isc/config/Makefile.am              |    5 +
 src/lib/python/isc/config/ccsession.py             |   32 +-
 src/lib/python/isc/config/tests/Makefile.am        |   14 +
 src/lib/python/isc/config/tests/ccsession_test.py  |   39 ++-
 src/lib/python/isc/datasrc/Makefile.am             |    5 +
 src/lib/python/isc/datasrc/tests/Makefile.am       |    8 +
 src/lib/python/isc/log/Makefile.am                 |   28 +-
 src/lib/python/isc/log/__init__.py                 |   34 +-
 src/lib/python/isc/log/log.cc                      |  701 ++++++++++++++++++++
 src/lib/python/isc/log/log.py                      |  280 --------
 src/lib/python/isc/log/tests/Makefile.am           |   18 +-
 src/lib/python/isc/log/tests/check_output.sh       |    3 +
 src/lib/python/isc/log/tests/console.out           |    4 +
 src/lib/python/isc/log/tests/log_console.py.in     |   15 +
 src/lib/python/isc/log/tests/log_test.in           |   26 -
 src/lib/python/isc/log/tests/log_test.py           |  350 ++++------
 src/lib/python/isc/net/Makefile.am                 |    5 +
 src/lib/python/isc/net/tests/Makefile.am           |    8 +
 src/lib/python/isc/notify/Makefile.am              |    5 +
 src/lib/python/isc/notify/tests/Makefile.am        |    2 +-
 src/lib/python/isc/testutils/Makefile.am           |    5 +
 src/lib/python/isc/util/Makefile.am                |    5 +
 src/lib/python/isc/util/tests/Makefile.am          |    8 +
 src/lib/util/io/Makefile.am                        |    4 +-
 src/lib/util/pyunittests/Makefile.am               |    3 +
 134 files changed, 5412 insertions(+), 1511 deletions(-)
 copy src/{lib/config/config_log.cc => bin/auth/auth_log.cc} (83%)
 create mode 100644 src/bin/auth/auth_log.h
 create mode 100644 src/bin/auth/auth_messages.mes
 mode change 100644 => 100755 src/bin/bindctl/run_bindctl.sh.in
 mode change 100644 => 100755 src/bin/loadzone/run_loadzone.sh.in
 rename src/bin/stats/{stats-httpd-xml.tpl.in => stats-httpd-xml.tpl} (100%)
 rename src/bin/stats/{stats-httpd-xsd.tpl.in => stats-httpd-xsd.tpl} (100%)
 rename src/bin/stats/{stats-httpd-xsl.tpl.in => stats-httpd-xsl.tpl} (100%)
 rename src/bin/stats/{stats-httpd.spec.in => stats-httpd.spec} (100%)
 rename src/bin/stats/{stats-schema.spec.in => stats-schema.spec} (100%)
 rename src/bin/stats/{stats.spec.in => stats.spec} (100%)
 mode change 100644 => 100755 src/bin/stats/stats_httpd.py.in
 create mode 100644 src/bin/xfrin/xfrin_messages.mes
 copy src/lib/{datasrc/logger.cc => acl/dns.cc} (71%)
 create mode 100644 src/lib/acl/dns.h
 create mode 100644 src/lib/acl/ip_check.cc
 create mode 100644 src/lib/acl/ip_check.h
 copy src/lib/{log/log_formatter.cc => acl/loader.cc} (51%)
 create mode 100644 src/lib/acl/loader.h
 create mode 100644 src/lib/acl/logic_check.h
 create mode 100644 src/lib/acl/tests/creators.h
 copy src/lib/{asiolink/tests/io_socket_unittest.cc => acl/tests/dns_test.cc} (57%)
 create mode 100644 src/lib/acl/tests/ip_check_unittest.cc
 create mode 100644 src/lib/acl/tests/loader_test.cc
 create mode 100644 src/lib/acl/tests/logcheck.h
 create mode 100644 src/lib/acl/tests/logic_check_test.cc
 rename src/lib/asiodns/{asiodef.mes => asiodns_messages.mes} (57%)
 rename src/lib/config/{configdef.mes => config_messages.mes} (54%)
 rename src/lib/datasrc/{messagedef.mes => datasrc_messages.mes} (68%)
 create mode 100644 src/lib/log/tests/logger_support_unittest.cc
 create mode 100644 src/lib/python/isc/log/log.cc
 delete mode 100644 src/lib/python/isc/log/log.py
 create mode 100755 src/lib/python/isc/log/tests/check_output.sh
 create mode 100644 src/lib/python/isc/log/tests/console.out
 create mode 100755 src/lib/python/isc/log/tests/log_console.py.in
 delete mode 100644 src/lib/python/isc/log/tests/log_test.in

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 0a48516..3d3e8d2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,22 @@
+261.	[func]      stephen
+	Add new-style logging messages to b10-auth.
+	(Trac 738, git c021505a1a0d6ecb15a8fd1592b94baff6d115f4)
+
+260.	[func]      stephen
+	Remove comma between message identification and the message
+	text in the new-style logging messages.
+	(Trac 1031, git 1c7930a7ba19706d388e4f8dcf2a55a886b74cd2)
+
+259.	[bug]       stephen
+	Logging now correctly initialized in b10-auth.  Also, fixed
+	bug whereby querying for "version.bind txt ch" would cause
+	b10-auth to crash if BIND 10 was started with the "-v" switch.
+	(Trac 1022,1023, git 926a65fa08617be677a93e9e388df0f229b01067)
+
+258.	[build]		jelte
+	Now builds and runs with Python 3.2
+	(Trac #710, git dae1d2e24f993e1eef9ab429326652f40a006dfb)
+
 257.	[bug]           y-aharen
 	Fixed a bug an instance of IntervalTimerImpl may be destructed 
 	while deadline_timer is holding the handler. This fix addresses
diff --git a/configure.ac b/configure.ac
index 43016d0..8221cdb 100644
--- a/configure.ac
+++ b/configure.ac
@@ -280,6 +280,35 @@ namespace isc {class Bar {Foo foo_;};} ],,
 	[AC_MSG_RESULT(yes)])
 CXXFLAGS="$CXXFLAGS_SAVED"
 
+# Python 3.2 has an unused parameter in one of its headers. This
+# has been reported, but not fixed as of yet, so we check if we need
+# to set -Wno-unused-parameter.
+if test $werror_ok = 1; then
+	CPPFLAGS_SAVED="$CPPFLAGS"
+	CPPFLAGS=${PYTHON_INCLUDES}
+	CXXFLAGS_SAVED="$CXXFLAGS"
+	CXXFLAGS="$CXXFLAGS $B10_CXXFLAGS -Werror"
+	AC_MSG_CHECKING([whether we need -Wno-unused-parameter for python])
+	AC_TRY_COMPILE(
+		[#include <Python.h>],
+		[],
+		[AC_MSG_RESULT(no)],
+		[
+		CXXFLAGS="$CXXFLAGS -Wno-unused-parameter"
+		AC_TRY_COMPILE([#include <Python.h>],
+		[],
+		[AC_MSG_RESULT(yes)
+		 PYTHON_CXXFLAGS="${PYTHON_CXXFLAGS} -Wno-unused-parameter"
+		 AC_SUBST(PYTHON_CXXFLAGS)
+		],
+		[AC_MSG_ERROR([Can't compile against Python.h])]
+                )
+                ]
+	)
+	CXXFLAGS="$CXXFLAGS_SAVED"
+	CPPFLAGS="$CPPFLAGS_SAVED"
+fi
+
 fi				dnl GXX = yes
 
 AM_CONDITIONAL(GCC_WERROR_OK, test $werror_ok = 1)
@@ -858,12 +887,6 @@ AC_OUTPUT([doc/version.ent
            src/bin/zonemgr/run_b10-zonemgr.sh
            src/bin/stats/stats.py
            src/bin/stats/stats_httpd.py
-           src/bin/stats/stats.spec
-           src/bin/stats/stats-schema.spec
-           src/bin/stats/stats-httpd.spec
-           src/bin/stats/stats-httpd-xml.tpl
-           src/bin/stats/stats-httpd-xsd.tpl
-           src/bin/stats/stats-httpd-xsl.tpl
            src/bin/bind10/bind10.py
            src/bin/bind10/run_bind10.sh
            src/bin/bind10/tests/bind10_test.py
@@ -885,8 +908,8 @@ AC_OUTPUT([doc/version.ent
            src/lib/config/tests/data_def_unittests_config.h
            src/lib/python/isc/config/tests/config_test
            src/lib/python/isc/cc/tests/cc_test
-           src/lib/python/isc/log/tests/log_test
            src/lib/python/isc/notify/tests/notify_out_test
+           src/lib/python/isc/log/tests/log_console.py
            src/lib/dns/gen-rdatacode.py
            src/lib/python/bind10_config.py
            src/lib/dns/tests/testdata/gen-wiredata.py
@@ -928,6 +951,7 @@ AC_OUTPUT([doc/version.ent
            chmod +x src/lib/log/tests/destination_test.sh
            chmod +x src/lib/log/tests/severity_test.sh
            chmod +x src/lib/util/python/mkpywrapper.py
+           chmod +x src/lib/python/isc/log/tests/log_console.py
            chmod +x tests/system/conf.sh
           ])
 AC_OUTPUT
@@ -954,6 +978,7 @@ Flags:
   B10_CXXFLAGS:  $B10_CXXFLAGS
 dnl includes too
   Python:        ${PYTHON_INCLUDES}
+                 ${PYTHON_CXXFLAGS}
                  ${PYTHON_LDFLAGS}
                  ${PYTHON_LIB}
   Boost:         ${BOOST_INCLUDES}
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index e80bc7f..7d1a006 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -13,7 +13,7 @@
     <subtitle>Administrator Reference for BIND 10</subtitle>
 
     <copyright>
-      <year>2010</year><holder>Internet Systems Consortium, Inc.</holder>
+      <year>2010-2011</year><holder>Internet Systems Consortium, Inc.</holder>
     </copyright>
 
     <abstract>
@@ -81,15 +81,13 @@
 
       <para>
 	BIND 10 uses the Botan crypto library for C++. It requires
-	at least Botan version 1.8. To build BIND 10, install the
-	Botan libraries and development include headers.
+	at least Botan version 1.8.
       </para>
 
-<!--
-TODO
-Debian and Ubuntu:
- libgmp3-dev and libbz2-dev required for botan too
--->
+      <para>
+	BIND 10 uses the log4cplus C++ logging library. It requires
+	at least log4cplus version 1.0.3.
+      </para>
 
       <para>
 	The authoritative server requires SQLite 3.3.9 or newer.
@@ -303,6 +301,12 @@ var/
 
     <section>
       <title>Building Requirements</title>
+
+        <para>
+          In addition to the run-time requirements, building BIND 10
+          from source code requires various development include headers.
+        </para>
+
         <note>
           <simpara>
             Some operating systems have split their distribution packages into
@@ -320,6 +324,19 @@ var/
         </para>
 
         <para>
+	  To build BIND 10, also install the Botan (at least version
+	  1.8) and the log4cplus (at least version 1.0.3)
+          development include headers.
+        </para>
+
+<!--
+TODO
+Debian and Ubuntu:
+ libgmp3-dev and libbz2-dev required for botan too
+-->
+
+        <para>
+<!-- TODO: is this needed at build time? test time? -->
 	  The Python Library and Python _sqlite3 module are required to
           enable the Xfrout and Xfrin support.
         </para>
@@ -333,7 +350,7 @@ var/
           Building BIND 10 also requires a C++ compiler and
           standard development headers, make, and pkg-config.
           BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
-          4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+          4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
         </para>
     </section>
 
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 9c52504..64136c1 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -16,7 +16,8 @@ endif
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 
-CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES  = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES += auth_messages.h auth_messages.cc
 
 man_MANS = b10-auth.8
 EXTRA_DIST = $(man_MANS) b10-auth.xml
@@ -34,16 +35,25 @@ auth.spec: auth.spec.pre
 spec_config.h: spec_config.h.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
 
-BUILT_SOURCES = spec_config.h
+auth_messages.h auth_messages.cc: auth_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/auth/auth_messages.mes
+
+BUILT_SOURCES = spec_config.h auth_messages.h auth_messages.cc
+
 pkglibexec_PROGRAMS = b10-auth
 b10_auth_SOURCES = query.cc query.h
 b10_auth_SOURCES += auth_srv.cc auth_srv.h
+b10_auth_SOURCES += auth_log.cc auth_log.h
 b10_auth_SOURCES += change_user.cc change_user.h
 b10_auth_SOURCES += auth_config.cc auth_config.h
 b10_auth_SOURCES += command.cc command.h
 b10_auth_SOURCES += common.h common.cc
 b10_auth_SOURCES += statistics.cc statistics.h
 b10_auth_SOURCES += main.cc
+
+nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
+EXTRA_DIST += auth_messages.mes
+
 b10_auth_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
 b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/auth_log.cc b/src/bin/auth/auth_log.cc
new file mode 100644
index 0000000..d41eaea
--- /dev/null
+++ b/src/bin/auth/auth_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the top-level component of b10-auth.
+
+#include "auth_log.h"
+
+namespace isc {
+namespace auth {
+
+isc::log::Logger auth_logger("auth");
+
+} // namespace auth
+} // namespace isc
+
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
new file mode 100644
index 0000000..5205624
--- /dev/null
+++ b/src/bin/auth/auth_log.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_LOG__H
+#define __AUTH_LOG__H
+
+#include <log/macros.h>
+#include <auth/auth_messages.h>
+
+namespace isc {
+namespace auth {
+
+/// \brief Auth Logging
+///
+/// Defines the levels used to output debug messages in the "auth" part of
+/// the b10-auth program.  Higher numbers equate to more verbose (and detailed)
+/// output.
+
+// Debug messages indicating normal startup are logged at this debug level.
+const int DBG_AUTH_START = 10;
+
+// Debug level used to log setting information (such as configuration changes).
+const int DBG_AUTH_OPS = 30;
+
+// Trace detailed operations, including errors raised when processing invalid
+// packets.  (These are not logged at severities of WARN or higher for fear
+// that a set of deliberately invalid packets set to the authoritative server
+// could overwhelm the logging.)
+const int DBG_AUTH_DETAIL = 50;
+
+// This level is used to log the contents of packets received and sent.
+const int DBG_AUTH_MESSAGES = 70;
+
+/// Define the logger for the "auth" module part of b10-auth.  We could define
+/// a logger in each file, but we would want to define a common name to avoid
+/// spelling mistakes, so it is just one small step from there to define a
+/// module-common logger.
+extern isc::log::Logger auth_logger;
+
+} // namespace nsas
+} // namespace isc
+
+#endif // __AUTH_LOG__H
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
new file mode 100644
index 0000000..8553d17
--- /dev/null
+++ b/src/bin/auth/auth_messages.mes
@@ -0,0 +1,260 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::auth
+
+% AUTH_AXFR_ERROR error handling AXFR request: %1
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+
+% AUTH_AXFR_UDP AXFR query received over UDP
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+
+% AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+
+% AUTH_CONFIG_CHANNEL_CREATED configuration session channel created
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_STARTED configuration session channel started
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+
+% AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+
+% AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+
+% AUTH_DATA_SOURCE data source database file: %1
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+
+% AUTH_DNS_SERVICES_CREATED DNS services created
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritiative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+
+% AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+
+% AUTH_LOAD_TSIG loading TSIG keys
+This is a debug message indicating that the authoritiative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_LOAD_ZONE loaded zone %1/%2
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+
+% AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+
+% AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+
+% AUTH_NO_STATS_SESSION session interface for statistics is not available
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+
+% AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+
+% AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+
+% AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+
+% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+
+% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+
+% AUTH_PACKET_RECEIVED message received:\n%1
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_PROCESS_FAIL message processing failure: %1
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+
+The server will return a SERVFAIL error code to the sender of the packet.
+However, this message indicates a potential error in the server.
+Please open a bug ticket for this issue.
+
+% AUTH_RECEIVED_COMMAND command '%1' received
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+
+% AUTH_RECEIVED_SENDSTATS command 'sendstats' received
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+
+% AUTH_RESPONSE_RECEIVED received response message, ignoring
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+
+% AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SERVER_CREATED server created
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+
+% AUTH_SERVER_FAILED server failed: %1
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+
+% AUTH_SERVER_STARTED server stated
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+
+% AUTH_SQLITE3 nothing to do for loading sqlite3
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+
+% AUTH_STATS_CHANNEL_CREATED STATS session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel.  It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_STATS_COMMS communication error in sending statistics data: %1
+An error was encountered when the authoritiative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+
+% AUTH_STATS_TIMEOUT timeout while sending statistics data: %1
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+
+% AUTH_STATS_TIMER_DISABLED statistics timer has been disabled
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+
+% AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+
+% AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+
+% AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process.  It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+
+% AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process.  It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+
+% AUTH_ZONEMGR_ERROR received error response from zone manager: %1
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+
+
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 9e01155..f29fd05 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -59,6 +59,7 @@
 #include <auth/auth_srv.h>
 #include <auth/query.h>
 #include <auth/statistics.h>
+#include <auth/auth_log.h>
 
 using namespace std;
 
@@ -104,7 +105,6 @@ public:
 
     /// These members are public because AuthSrv accesses them directly.
     ModuleCCSession* config_session_;
-    bool verbose_mode_;
     AbstractSession* xfrin_session_;
 
     /// In-memory data source.  Currently class IN only for simplicity.
@@ -143,11 +143,11 @@ private:
 
 AuthSrvImpl::AuthSrvImpl(const bool use_cache,
                          AbstractXfroutClient& xfrout_client) :
-    config_session_(NULL), verbose_mode_(false),
+    config_session_(NULL),
     xfrin_session_(NULL),
     memory_datasrc_class_(RRClass::IN()),
     statistics_timer_(io_service_),
-    counters_(verbose_mode_),
+    counters_(),
     keyring_(NULL),
     xfrout_connected_(false),
     xfrout_client_(xfrout_client)
@@ -251,7 +251,7 @@ public:
 
 void
 makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
-                 const Rcode& rcode, const bool verbose_mode,
+                 const Rcode& rcode, 
                  std::auto_ptr<TSIGContext> tsig_context =
                  std::auto_ptr<TSIGContext>())
 {
@@ -289,22 +289,9 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
     } else {
         message->toWire(renderer);
     }
-
-    if (verbose_mode) {
-        cerr << "[b10-auth] sending an error response (" <<
-            renderer.getLength() << " bytes):\n" << message->toText() << endl;
-    }
-}
-}
-
-void
-AuthSrv::setVerbose(const bool on) {
-    impl_->verbose_mode_ = on;
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
+              .arg(message->toText());
 }
-
-bool
-AuthSrv::getVerbose() const {
-    return (impl_->verbose_mode_);
 }
 
 IOService&
@@ -362,15 +349,12 @@ AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
         isc_throw(InvalidParameter,
                   "Memory data source is not supported for RR class "
                   << rrclass);
-    }
-    if (impl_->verbose_mode_) {
-        if (!impl_->memory_datasrc_ && memory_datasrc) {
-            cerr << "[b10-auth] Memory data source is enabled for class "
-                 << rrclass << endl;
-        } else if (impl_->memory_datasrc_ && !memory_datasrc) {
-            cerr << "[b10-auth] Memory data source is disabled for class "
-                 << rrclass << endl;
-        }
+    } else if (!impl_->memory_datasrc_ && memory_datasrc) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
+                  .arg(rrclass);
+    } else if (impl_->memory_datasrc_ && !memory_datasrc) {
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
+                  .arg(rrclass);
     }
     impl_->memory_datasrc_ = memory_datasrc;
 }
@@ -392,18 +376,13 @@ AuthSrv::setStatisticsTimerInterval(uint32_t interval) {
     }
     if (interval == 0) {
         impl_->statistics_timer_.cancel();
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_DISABLED);
     } else {
         impl_->statistics_timer_.setup(boost::bind(&AuthSrv::submitStatistics,
                                                    this),
                                        interval * 1000);
-    }
-    if (impl_->verbose_mode_) {
-        if (interval == 0) {
-            cerr << "[b10-auth] Disabled statistics timer" << endl;
-        } else {
-            cerr << "[b10-auth] Set statistics timer to " << interval
-                 << " seconds" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_SET)
+                  .arg(interval);
     }
 }
 
@@ -420,17 +399,13 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
 
         // Ignore all responses.
         if (message->getHeaderFlag(Message::HEADERFLAG_QR)) {
-            if (impl_->verbose_mode_) {
-                cerr << "[b10-auth] received unexpected response, ignoring"
-                     << endl;
-            }
+            LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RESPONSE_RECEIVED);
             server->resume(false);
             return;
         }
     } catch (const Exception& ex) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] DNS packet exception: " << ex.what() << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_HEADER_PARSE_FAIL)
+                  .arg(ex.what());
         server->resume(false);
         return;
     }
@@ -439,27 +414,21 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
         // Parse the message.
         message->fromWire(request_buffer);
     } catch (const DNSProtocolError& error) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] returning " <<  error.getRcode().toText()
-                 << ": " << error.what() << endl;
-        }
-        makeErrorMessage(message, buffer, error.getRcode(),
-                         impl_->verbose_mode_);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+                  .arg(error.getRcode().toText()).arg(error.what());
+        makeErrorMessage(message, buffer, error.getRcode());
         server->resume(true);
         return;
     } catch (const Exception& ex) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] returning SERVFAIL: " << ex.what() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(),
-                         impl_->verbose_mode_);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+                  .arg(ex.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL());
         server->resume(true);
         return;
     } // other exceptions will be handled at a higher layer.
 
-    if (impl_->verbose_mode_) {
-        cerr << "[b10-auth] received a message:\n" << message->toText() << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_PACKET_RECEIVED)
+              .arg(message->toText());
 
     // Perform further protocol-level validation.
     // TSIG first
@@ -481,20 +450,16 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
 
     bool sendAnswer = true;
     if (tsig_error != TSIGError::NOERROR()) {
-        makeErrorMessage(message, buffer, tsig_error.toRcode(),
-                         impl_->verbose_mode_, tsig_context);
+        makeErrorMessage(message, buffer, tsig_error.toRcode(), tsig_context);
     } else if (message->getOpcode() == Opcode::NOTIFY()) {
         sendAnswer = impl_->processNotify(io_message, message, buffer,
                                           tsig_context);
     } else if (message->getOpcode() != Opcode::QUERY()) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] unsupported opcode" << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::NOTIMP(),
-                         impl_->verbose_mode_, tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE)
+                  .arg(message->getOpcode().toText());
+        makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
     } else if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
-        makeErrorMessage(message, buffer, Rcode::FORMERR(),
-                         impl_->verbose_mode_, tsig_context);
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
     } else {
         ConstQuestionPtr question = *message->beginQuestion();
         const RRType &qtype = question->getType();
@@ -502,8 +467,7 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
             sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
                                                  tsig_context);
         } else if (qtype == RRType::IXFR()) {
-            makeErrorMessage(message, buffer, Rcode::NOTIMP(),
-                             impl_->verbose_mode_, tsig_context);
+            makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
         } else {
             sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
                                                    tsig_context);
@@ -550,11 +514,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
             data_sources_.doQuery(query);
         }
     } catch (const Exception& ex) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] Internal error, returning SERVFAIL: " <<
-                ex.what() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+        LOG_ERROR(auth_logger, AUTH_PROCESS_FAIL).arg(ex.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL());
         return (true);
     }
 
@@ -567,12 +528,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
     } else {
         message->toWire(renderer);
     }
-
-    if (verbose_mode_) {
-        cerr << "[b10-auth] sending a response ("
-             << renderer.getLength()
-             << " bytes):\n" << message->toText() << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_NORMAL_RESPONSE)
+              .arg(renderer.getLength()).arg(message->toText());
 
     return (true);
 }
@@ -586,11 +543,8 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
     incCounter(io_message.getSocket().getProtocol());
 
     if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] AXFR query over UDP isn't allowed" << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_UDP);
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
 
@@ -613,12 +567,9 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
             xfrout_connected_ = false;
         }
 
-        if (verbose_mode_) {
-            cerr << "[b10-auth] Error in handling XFR request: " << err.what()
-                 << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+                  .arg(err.what());
+        makeErrorMessage(message, buffer, Rcode::SERVFAIL(), tsig_context);
         return (true);
     }
 
@@ -633,22 +584,16 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
     // The incoming notify must contain exactly one question for SOA of the
     // zone name.
     if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
-        if (verbose_mode_) {
-                cerr << "[b10-auth] invalid number of questions in notify: "
-                     << message->getRRCount(Message::SECTION_QUESTION) << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_QUESTIONS)
+                  .arg(message->getRRCount(Message::SECTION_QUESTION));
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
     ConstQuestionPtr question = *message->beginQuestion();
     if (question->getType() != RRType::SOA()) {
-        if (verbose_mode_) {
-                cerr << "[b10-auth] invalid question RR type in notify: "
-                     << question->getType() << endl;
-        }
-        makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
-                         tsig_context);
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_RRTYPE)
+                  .arg(question->getType().toText());
+        makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
         return (true);
     }
 
@@ -664,10 +609,7 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
     // silent about such cases, but there doesn't seem to be anything we can
     // improve at the primary server side by sending an error anyway.
     if (xfrin_session_ == NULL) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] "
-                "session interface for xfrin is not available" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
         return (false);
     }
 
@@ -693,16 +635,12 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
         int rcode;
         parsed_answer = parseAnswer(rcode, answer);
         if (rcode != 0) {
-            if (verbose_mode_) {
-                cerr << "[b10-auth] failed to notify Zonemgr: "
-                     << parsed_answer->str() << endl;
-            }
+            LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
+                      .arg(parsed_answer->str());
             return (false);
         }
     } catch (const Exception& ex) {
-        if (verbose_mode_) {
-            cerr << "[b10-auth] failed to notify Zonemgr: " << ex.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_ZONEMGR_COMMS).arg(ex.what());
         return (false);
     }
 
@@ -762,10 +700,7 @@ AuthSrvImpl::setDbFile(ConstElementPtr config) {
     } else {
         return (answer);
     }
-
-    if (verbose_mode_) {
-        cerr << "[b10-auth] Data source database file: " << db_file_ << endl;
-    }
+    LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_DATA_SOURCE).arg(db_file_);
 
     // create SQL data source
     // Note: the following step is tricky to be exception-safe and to ensure
@@ -795,9 +730,7 @@ AuthSrv::updateConfig(ConstElementPtr new_config) {
         }
         return (impl_->setDbFile(new_config));
     } catch (const isc::Exception& error) {
-        if (impl_->verbose_mode_) {
-            cerr << "[b10-auth] error: " << error.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_CONFIG_UPDATE_FAIL).arg(error.what());
         return (isc::config::createAnswer(1, error.what()));
     }
 }
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 19c97b5..7eede97 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -124,27 +124,6 @@ public:
                         isc::util::OutputBufferPtr buffer,
                         isc::asiodns::DNSServer* server);
 
-    /// \brief Set verbose flag
-    ///
-    /// \param on The new value of the verbose flag
-
-    /// \brief Enable or disable verbose logging.
-    ///
-    /// This method never throws an exception.
-    ///
-    /// \param on \c true to enable verbose logging; \c false to disable
-    /// verbose logging.
-    void setVerbose(const bool on);
-
-    /// \brief Returns the logging verbosity of the \c AuthSrv object.
-    ///
-    /// This method never throws an exception.
-    ///
-    /// \return \c true if verbose logging is enabled; otherwise \c false.
-
-    /// \brief Get the current value of the verbose flag
-    bool getVerbose() const;
-
     /// \brief Updates the data source for the \c AuthSrv object.
     ///
     /// This method installs or replaces the data source that the \c AuthSrv
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 77d171f..cf3fe4a 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -12,6 +12,9 @@ query_bench_SOURCES += ../query.h  ../query.cc
 query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
 query_bench_SOURCES += ../auth_config.h ../auth_config.cc
 query_bench_SOURCES += ../statistics.h ../statistics.cc
+query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+
+nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
 
 query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
 query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index eafcae8..fe3d729 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -27,16 +27,18 @@
 
 #include <config/ccsession.h>
 
+#include <auth/auth_log.h>
 #include <auth/auth_srv.h>
 #include <auth/command.h>
 
-using namespace std;
-using boost::shared_ptr;
 using boost::scoped_ptr;
-using namespace isc::dns;
+using boost::shared_ptr;
+using namespace isc::auth;
+using namespace isc::config;
 using namespace isc::data;
 using namespace isc::datasrc;
-using namespace isc::config;
+using namespace isc::dns;
+using namespace std;
 
 namespace {
 /// An exception that is thrown if an error occurs while handling a command
@@ -115,9 +117,7 @@ public:
 class SendStatsCommand : public AuthCommand {
 public:
     virtual void exec(AuthSrv& server, isc::data::ConstElementPtr) {
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] command 'sendstats' received" << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_SENDSTATS);
         server.submitStatistics();
     }
 };
@@ -140,11 +140,8 @@ public:
                                                       oldzone->getOrigin()));
         newzone->load(oldzone->getFileName());
         oldzone->swap(*newzone);
-
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] Loaded zone '" << newzone->getOrigin()
-                 << "'/" << newzone->getClass() << endl;
-        }
+        LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
+                  .arg(newzone->getOrigin()).arg(newzone->getClass());
     }
 
 private:
@@ -164,10 +161,7 @@ private:
         ConstElementPtr datasrc_elem = args->get("datasrc");
         if (datasrc_elem) {
             if (datasrc_elem->stringValue() == "sqlite3") {
-                if (server.getVerbose()) {
-                    cerr << "[b10-auth] Nothing to do for loading sqlite3"
-                         << endl;
-                }
+                LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_SQLITE3);
                 return (false);
             } else if (datasrc_elem->stringValue() != "memory") {
                 // (note: at this point it's guaranteed that datasrc_elem
@@ -233,18 +227,13 @@ ConstElementPtr
 execAuthServerCommand(AuthSrv& server, const string& command_id,
                       ConstElementPtr args)
 {
-    if (server.getVerbose()) {
-        cerr << "[b10-auth] Received '" << command_id << "' command" << endl;
-    }
-
+    LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_COMMAND).arg(command_id);
     try {
         scoped_ptr<AuthCommand>(createAuthCommand(command_id))->exec(server,
                                                                      args);
     } catch (const isc::Exception& ex) {
-        if (server.getVerbose()) {
-            cerr << "[b10-auth] Command '" << command_id
-                 << "' execution failed: " << ex.what() << endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_COMMAND_FAILED).arg(command_id)
+                                                   .arg(ex.what());
         return (createAnswer(1, ex.what()));
     }
 
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 0324c6e..c8f6762 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,25 +44,26 @@
 #include <auth/command.h>
 #include <auth/change_user.h>
 #include <auth/auth_srv.h>
+#include <auth/auth_log.h>
 #include <asiodns/asiodns.h>
 #include <asiolink/asiolink.h>
-#include <log/dummylog.h>
+#include <log/logger_support.h>
 #include <server_common/keyring.h>
 
 using namespace std;
-using namespace isc::data;
+using namespace isc::asiodns;
+using namespace isc::asiolink;
+using namespace isc::auth;
 using namespace isc::cc;
 using namespace isc::config;
+using namespace isc::data;
 using namespace isc::dns;
+using namespace isc::log;
 using namespace isc::util;
 using namespace isc::xfr;
-using namespace isc::asiolink;
-using namespace isc::asiodns;
 
 namespace {
 
-bool verbose_mode = false;
-
 /* need global var for config/command handlers.
  * todo: turn this around, and put handlers in the authserver
  * class itself? */
@@ -88,6 +89,7 @@ usage() {
     cerr << "\t-v: verbose output" << endl;
     exit(1);
 }
+
 } // end of anonymous namespace
 
 int
@@ -95,6 +97,7 @@ main(int argc, char* argv[]) {
     int ch;
     const char* uid = NULL;
     bool cache = true;
+    bool verbose = false;
 
     while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
         switch (ch) {
@@ -105,8 +108,7 @@ main(int argc, char* argv[]) {
             uid = optarg;
             break;
         case 'v':
-            verbose_mode = true;
-            isc::log::denabled = true;
+            verbose = true;
             break;
         case '?':
         default:
@@ -118,6 +120,11 @@ main(int argc, char* argv[]) {
         usage();
     }
 
+    // Initialize logging.  If verbose, we'll use maximum verbosity.
+    isc::log::initLogger("b10-auth",
+                         (verbose ? isc::log::DEBUG : isc::log::INFO),
+                         isc::log::MAX_DEBUG_LEVEL, NULL);
+
     int ret = 0;
 
     // XXX: we should eventually pass io_service here.
@@ -138,8 +145,7 @@ main(int argc, char* argv[]) {
         }
 
         auth_server = new AuthSrv(cache, xfrout_client);
-        auth_server->setVerbose(verbose_mode);
-        cout << "[b10-auth] Server created." << endl;
+        LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
 
         SimpleCallback* checkin = auth_server->getCheckinProvider();
         IOService& io_service = auth_server->getIOService();
@@ -148,10 +154,10 @@ main(int argc, char* argv[]) {
 
         DNSService dns_service(io_service, checkin, lookup, answer);
         auth_server->setDNSService(dns_service);
-        cout << "[b10-auth] DNSServices created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
 
         cc_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Configuration session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
 
         // We delay starting listening to new commands/config just before we
         // go into the main loop to avoid confusion due to mixture of
@@ -161,19 +167,19 @@ main(int argc, char* argv[]) {
         config_session = new ModuleCCSession(specfile, *cc_session,
                                              my_config_handler,
                                              my_command_handler, false);
-        cout << "[b10-auth] Configuration channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
 
         xfrin_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Xfrin session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
         xfrin_session->establish(NULL);
         xfrin_session_established = true;
-        cout << "[b10-auth] Xfrin session channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
 
         statistics_session = new Session(io_service.get_io_service());
-        cout << "[b10-auth] Statistics session channel created." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_CREATED);
         statistics_session->establish(NULL);
         statistics_session_established = true;
-        cout << "[b10-auth] Statistics session channel established." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_ESTABLISHED);
 
         auth_server->setXfrinSession(xfrin_session);
         auth_server->setStatisticsSession(statistics_session);
@@ -182,33 +188,34 @@ main(int argc, char* argv[]) {
         // all initial configurations, but as a short term workaround we
         // handle the traditional "database_file" setup by directly calling
         // updateConfig().
-        // if server load configure failed, we won't exit, give user second chance
-        // to correct the configure.
+        // if server load configure failed, we won't exit, give user second
+        // chance to correct the configure.
         auth_server->setConfigSession(config_session);
         try {
             configureAuthServer(*auth_server, config_session->getFullConfig());
             auth_server->updateConfig(ElementPtr());
         } catch (const AuthConfigError& ex) {
-            cout << "[bin10-auth] Server load config failed:" << ex.what() << endl;
+            LOG_ERROR(auth_logger, AUTH_CONFIG_LOAD_FAIL).arg(ex.what());
         }
 
         if (uid != NULL) {
             changeUser(uid);
         }
 
-        cout << "[b10-auth] Loading TSIG keys" << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_LOAD_TSIG);
         isc::server_common::initKeyring(*config_session);
         auth_server->setTSIGKeyRing(&isc::server_common::keyring);
 
         // Now start asynchronous read.
         config_session->start();
-        cout << "[b10-auth] Configuration channel started." << endl;
+        LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_STARTED);
 
-        cout << "[b10-auth] Server started." << endl;
+        // Successfully initialized.
+        LOG_INFO(auth_logger, AUTH_SERVER_STARTED);
         io_service.run();
 
     } catch (const std::exception& ex) {
-        cerr << "[b10-auth] Server failed: " << ex.what() << endl;
+        LOG_FATAL(auth_logger, AUTH_SERVER_FAILED).arg(ex.what());
         ret = 1;
     }
 
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 415aa14..76e5007 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -13,6 +13,7 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <auth/statistics.h>
+#include <auth/auth_log.h>
 
 #include <cc/data.h>
 #include <cc/session.h>
@@ -20,6 +21,8 @@
 #include <sstream>
 #include <iostream>
 
+using namespace isc::auth;
+
 // TODO: We need a namespace ("auth_server"?) to hold
 // AuthSrv and AuthCounters.
 
@@ -29,10 +32,7 @@ private:
     AuthCountersImpl(const AuthCountersImpl& source);
     AuthCountersImpl& operator=(const AuthCountersImpl& source);
 public:
-    // References verbose_mode flag in AuthSrvImpl
-    // TODO: Fix this short term workaround for logging
-    // after we have logging framework
-    AuthCountersImpl(const bool& verbose_mode);
+    AuthCountersImpl();
     ~AuthCountersImpl();
     void inc(const AuthCounters::CounterType type);
     bool submitStatistics() const;
@@ -42,15 +42,13 @@ public:
 private:
     std::vector<uint64_t> counters_;
     isc::cc::AbstractSession* statistics_session_;
-    const bool& verbose_mode_;
 };
 
-AuthCountersImpl::AuthCountersImpl(const bool& verbose_mode) :
+AuthCountersImpl::AuthCountersImpl() :
     // initialize counter
     // size: AuthCounters::COUNTER_TYPES, initial value: 0
     counters_(AuthCounters::COUNTER_TYPES, 0),
-    statistics_session_(NULL),
-    verbose_mode_(verbose_mode)
+    statistics_session_(NULL)
 {}
 
 AuthCountersImpl::~AuthCountersImpl()
@@ -64,11 +62,7 @@ AuthCountersImpl::inc(const AuthCounters::CounterType type) {
 bool
 AuthCountersImpl::submitStatistics() const {
     if (statistics_session_ == NULL) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "session interface for statistics"
-                      << " is not available" << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_NO_STATS_SESSION);
         return (false);
     }
     std::stringstream statistics_string;
@@ -95,18 +89,10 @@ AuthCountersImpl::submitStatistics() const {
         // currently it just returns empty message
         statistics_session_->group_recvmsg(env, answer, false, seq);
     } catch (const isc::cc::SessionError& ex) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "communication error in sending statistics data: "
-                      << ex.what() << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_STATS_COMMS).arg(ex.what());
         return (false);
     } catch (const isc::cc::SessionTimeout& ex) {
-        if (verbose_mode_) {
-            std::cerr << "[b10-auth] "
-                      << "timeout happened while sending statistics data: "
-                      << ex.what() << std::endl;
-        }
+        LOG_ERROR(auth_logger, AUTH_STATS_TIMEOUT).arg(ex.what());
         return (false);
     }
     return (true);
@@ -125,8 +111,7 @@ AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
     return (counters_.at(type));
 }
 
-AuthCounters::AuthCounters(const bool& verbose_mode) :
-    impl_(new AuthCountersImpl(verbose_mode))
+AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
 {}
 
 AuthCounters::~AuthCounters() {
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 9e5240e..5bf6436 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -61,15 +61,10 @@ public:
     };
     /// The constructor.
     ///
-    /// \param verbose_mode reference to verbose_mode_ of AuthSrvImpl
-    ///
     /// This constructor is mostly exception free. But it may still throw
     /// a standard exception if memory allocation fails inside the method.
     ///
-    /// \todo Fix this short term workaround for logging
-    /// after we have logging framework.
-    ///
-    AuthCounters(const bool& verbose_mode);
+    AuthCounters();
     /// The destructor.
     ///
     /// This method never throws an exception.
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index a4620f5..71520c2 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -22,6 +22,7 @@ TESTS += run_unittests
 run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
 run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
 run_unittests_SOURCES += ../auth_srv.h ../auth_srv.cc
+run_unittests_SOURCES += ../auth_log.h ../auth_log.cc
 run_unittests_SOURCES += ../query.h ../query.cc
 run_unittests_SOURCES += ../change_user.h ../change_user.cc
 run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
@@ -36,6 +37,9 @@ run_unittests_SOURCES += query_unittest.cc
 run_unittests_SOURCES += change_user_unittest.cc
 run_unittests_SOURCES += statistics_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
+
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 run_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index d922901..2b20d65 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -190,15 +190,6 @@ TEST_F(AuthSrvTest, unsupportedRequest) {
     unsupportedRequest();
 }
 
-// Simple API check
-TEST_F(AuthSrvTest, verbose) {
-    EXPECT_FALSE(server.getVerbose());
-    server.setVerbose(true);
-    EXPECT_TRUE(server.getVerbose());
-    server.setVerbose(false);
-    EXPECT_FALSE(server.getVerbose());
-}
-
 // Multiple questions.  Should result in FORMERR.
 TEST_F(AuthSrvTest, multiQuestion) {
     multiQuestion();
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 062b70d..9a3dded 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -69,13 +69,12 @@ private:
     };
 
 protected:
-    AuthCountersTest() : verbose_mode_(false), counters(verbose_mode_) {
+    AuthCountersTest() : counters() {
         counters.setStatisticsSession(&statistics_session_);
     }
     ~AuthCountersTest() {
     }
     MockSession statistics_session_;
-    bool verbose_mode_;
     AuthCounters counters;
 };
 
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 254875f..cca4a53 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -27,3 +27,8 @@ bind10: bind10.py
 
 pytest:
 	$(SHELL) tests/bind10_test
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 89301bd..3a36e01 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -30,7 +30,7 @@ export PYTHONPATH
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 34d809a..3d8d57a 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -4,6 +4,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = bind10_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -13,6 +20,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
 	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
 		$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index 2f412ec..cd8bcb3 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -25,3 +25,8 @@ bindctl: bindctl_main.py
 	       -e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
 	       -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl_main.py >$@
 	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
old mode 100644
new mode 100755
index 730ce1e..8f6ba59
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -23,6 +23,14 @@ BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
 PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
 export PYTHONPATH
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	export @ENV_LIBRARY_PATH@
+fi
+
 B10_FROM_SOURCE=@abs_top_srcdir@
 export B10_FROM_SOURCE
 
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index d2bb90f..891d413 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = bindctl_test.py cmdparse_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin  \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/cfgmgr/Makefile.am b/src/bin/cfgmgr/Makefile.am
index fc0ed4a..aee78cf 100644
--- a/src/bin/cfgmgr/Makefile.am
+++ b/src/bin/cfgmgr/Makefile.am
@@ -28,3 +28,8 @@ install-data-local:
 	$(mkinstalldirs) $(DESTDIR)/@localstatedir@/@PACKAGE@
 # TODO: permissions handled later
 
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/plugins/Makefile.am b/src/bin/cfgmgr/plugins/Makefile.am
index 64f9dc3..529a4ed 100644
--- a/src/bin/cfgmgr/plugins/Makefile.am
+++ b/src/bin/cfgmgr/plugins/Makefile.am
@@ -4,3 +4,9 @@ EXTRA_DIST += logging.spec b10logging.py
 
 config_plugindir = @prefix@/share/@PACKAGE@/config_plugins
 config_plugin_DATA = tsig_keys.py tsig_keys.spec
+config_plugin_DATA += b10logging.py logging.spec
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 896dab7..725d391 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index 68666e6..9f80e5d 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = b10-cfgmgr_test.py
 
 EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -13,6 +20,12 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	env TESTDATA_PATH=$(abs_srcdir)/testdata \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
+
+CLEANDIRS = testdata/plugins/__pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index 33f8b76..97a64ff 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -51,3 +51,8 @@ install-data-local:
 	done
 
 endif
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 6a4d7d4..e4ec9d4 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = cmdctl_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
 	CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
 	CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
old mode 100644
new mode 100755
index b7ac19f..95de396
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -21,6 +21,14 @@ export PYTHON_EXEC
 PYTHONPATH=@abs_top_builddir@/src/lib/python
 export PYTHONPATH
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	export @ENV_LIBRARY_PATH@
+fi
+
 BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
 export BIND10_MSGQ_SOCKET_FILE
 
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index a90cab2..3507bfa 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,8 +13,15 @@ EXTRA_DIST += ttl2.db
 EXTRA_DIST += ttlext.db
 EXTRA_DIST += example.db
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # TODO: maybe use TESTS?
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 	echo Running test: correct_test.sh 
-	$(SHELL) $(abs_builddir)/correct_test.sh
+	$(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/correct_test.sh
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index bbeec07..87bb1cf 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,8 +12,15 @@ EXTRA_DIST += keyerror3.db
 EXTRA_DIST += originerr1.db
 EXTRA_DIST += originerr2.db
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # TODO: use TESTS ?
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 	echo Running test: error_test.sh
-	$(SHELL) $(abs_builddir)/error_test.sh
+	$(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/error_test.sh
diff --git a/src/bin/msgq/Makefile.am b/src/bin/msgq/Makefile.am
index 61d4f23..0eebf00 100644
--- a/src/bin/msgq/Makefile.am
+++ b/src/bin/msgq/Makefile.am
@@ -20,3 +20,8 @@ endif
 b10-msgq: msgq.py
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" msgq.py >$@
 	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 0bbb964..50c1e6e 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = msgq_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/resolver/resolverdef.mes b/src/bin/resolver/resolverdef.mes
index bb89cfa..47433a4 100644
--- a/src/bin/resolver/resolverdef.mes
+++ b/src/bin/resolver/resolverdef.mes
@@ -118,7 +118,7 @@ of the body of the message failed due to some non-protocol related reason
 (although the parsing of the header succeeded).  The message parameters give
 a textual description of the problem and the RCODE returned.
 
-% PRINTMSG      print message command, aeguments are: %1
+% PRINTMSG      print message command, arguments are: %1
 This message is logged when a "print_message" command is received over the
 command channel.
 
@@ -169,7 +169,7 @@ resolver gives up trying to resolve a query.  Retry count: the number of times
 the resolver will retry a query to an upstream server if it gets a timeout.
 
 The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
 upstream nameservers.  Even if none of these queries timeout, the total time
 taken to perform all the queries may exceed the client timeout.  When this
 happens, a SERVFAIL is returned to the client, but the resolver continues
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index e4a4f92..c8b18c9 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -34,3 +34,8 @@ b10-stats: stats.py
 b10-stats-httpd: stats_httpd.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats_httpd.py >$@
 	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
new file mode 100644
index 0000000..d5846ad
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
+<!--
+ - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<stats:stats_data version="1.0"
+  xmlns:stats="$xsd_namespace"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="$xsd_namespace $xsd_url_path">
+  $xml_string
+</stats:stats_data>
diff --git a/src/bin/stats/stats-httpd-xml.tpl.in b/src/bin/stats/stats-httpd-xml.tpl.in
deleted file mode 100644
index d5846ad..0000000
--- a/src/bin/stats/stats-httpd-xml.tpl.in
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
-  xmlns:stats="$xsd_namespace"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="$xsd_namespace $xsd_url_path">
-  $xml_string
-</stats:stats_data>
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
new file mode 100644
index 0000000..6ad1280
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<schema targetNamespace="$xsd_namespace"
+  xmlns="http://www.w3.org/2001/XMLSchema"
+  xmlns:stats="$xsd_namespace">
+  <annotation>
+    <documentation xml:lang="en">XML schema of the statistics
+      data in BIND 10</documentation>
+  </annotation>
+  <element name="stats_data">
+    <annotation>
+      <documentation>A set of statistics data</documentation>
+    </annotation>
+    <complexType>
+      $xsd_string
+      <attribute name="version" type="token" use="optional" default="1.0">
+        <annotation>
+          <documentation>Version number of syntax</documentation>
+        </annotation>
+      </attribute>
+    </complexType>
+  </element>
+</schema>
diff --git a/src/bin/stats/stats-httpd-xsd.tpl.in b/src/bin/stats/stats-httpd-xsd.tpl.in
deleted file mode 100644
index 6ad1280..0000000
--- a/src/bin/stats/stats-httpd-xsd.tpl.in
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
-  xmlns="http://www.w3.org/2001/XMLSchema"
-  xmlns:stats="$xsd_namespace">
-  <annotation>
-    <documentation xml:lang="en">XML schema of the statistics
-      data in BIND 10</documentation>
-  </annotation>
-  <element name="stats_data">
-    <annotation>
-      <documentation>A set of statistics data</documentation>
-    </annotation>
-    <complexType>
-      $xsd_string
-      <attribute name="version" type="token" use="optional" default="1.0">
-        <annotation>
-          <documentation>Version number of syntax</documentation>
-        </annotation>
-      </attribute>
-    </complexType>
-  </element>
-</schema>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
new file mode 100644
index 0000000..01ffdc6
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<xsl:stylesheet version="1.0"
+  xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
+  xmlns:stats="$xsd_namespace">
+  <xsl:output method="html" encoding="UTF-8"
+    doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
+    doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
+  <xsl:template match="/">
+    <html lang="en">
+      <head>
+        <title>BIND 10 Statistics</title>
+        <style type="text/css"><![CDATA[
+table {
+  border: 1px #000000 solid;
+  border-collapse: collapse;
+}
+td, th {
+  padding: 3px 20px;
+  border: 1px #000000 solid;
+}
+td.title {
+  text-decoration:underline;
+}
+]]>
+        </style>
+      </head>
+      <body>
+        <h1>BIND 10 Statistics</h1>
+        <table>
+          <tr>
+            <th>Title</th>
+            <th>Value</th>
+          </tr>
+          <xsl:apply-templates />
+        </table>
+      </body>
+    </html>
+  </xsl:template>
+  $xsl_string
+</xsl:stylesheet>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl.in b/src/bin/stats/stats-httpd-xsl.tpl.in
deleted file mode 100644
index 01ffdc6..0000000
--- a/src/bin/stats/stats-httpd-xsl.tpl.in
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<xsl:stylesheet version="1.0"
-  xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
-  xmlns:stats="$xsd_namespace">
-  <xsl:output method="html" encoding="UTF-8"
-    doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
-    doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
-  <xsl:template match="/">
-    <html lang="en">
-      <head>
-        <title>BIND 10 Statistics</title>
-        <style type="text/css"><![CDATA[
-table {
-  border: 1px #000000 solid;
-  border-collapse: collapse;
-}
-td, th {
-  padding: 3px 20px;
-  border: 1px #000000 solid;
-}
-td.title {
-  text-decoration:underline;
-}
-]]>
-        </style>
-      </head>
-      <body>
-        <h1>BIND 10 Statistics</h1>
-        <table>
-          <tr>
-            <th>Title</th>
-            <th>Value</th>
-          </tr>
-          <xsl:apply-templates />
-        </table>
-      </body>
-    </html>
-  </xsl:template>
-  $xsl_string
-</xsl:stylesheet>
diff --git a/src/bin/stats/stats-httpd.spec b/src/bin/stats/stats-httpd.spec
new file mode 100644
index 0000000..6307135
--- /dev/null
+++ b/src/bin/stats/stats-httpd.spec
@@ -0,0 +1,54 @@
+{
+  "module_spec": {
+    "module_name": "StatsHttpd",
+    "module_description": "Stats HTTP daemon",
+    "config_data": [
+      {
+        "item_name": "listen_on",
+        "item_type": "list",
+        "item_optional": false,
+        "item_default": [
+          {
+            "address": "127.0.0.1",
+            "port": 8000
+          }
+        ],
+        "list_item_spec": {
+          "item_name": "address",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": {},
+          "map_item_spec": [
+            {
+              "item_name": "address",
+              "item_type": "string",
+              "item_optional": true,
+              "item_default": "127.0.0.1",
+              "item_description": "listen-on address for HTTP"
+            },
+            {
+              "item_name": "port",
+              "item_type": "integer",
+              "item_optional": true,
+              "item_default": 8000,
+              "item_description": "listen-on port for HTTP"
+            }
+          ]
+        },
+        "item_description": "http listen-on address and port"
+      }
+    ],
+    "commands": [
+      {
+        "command_name": "status",
+        "command_description": "Status of the stats httpd",
+        "command_args": []
+      },
+      {
+        "command_name": "shutdown",
+        "command_description": "Shut down the stats httpd",
+        "command_args": []
+      }
+    ]
+  }
+}
diff --git a/src/bin/stats/stats-httpd.spec.in b/src/bin/stats/stats-httpd.spec.in
deleted file mode 100644
index 6307135..0000000
--- a/src/bin/stats/stats-httpd.spec.in
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "module_spec": {
-    "module_name": "StatsHttpd",
-    "module_description": "Stats HTTP daemon",
-    "config_data": [
-      {
-        "item_name": "listen_on",
-        "item_type": "list",
-        "item_optional": false,
-        "item_default": [
-          {
-            "address": "127.0.0.1",
-            "port": 8000
-          }
-        ],
-        "list_item_spec": {
-          "item_name": "address",
-          "item_type": "map",
-          "item_optional": false,
-          "item_default": {},
-          "map_item_spec": [
-            {
-              "item_name": "address",
-              "item_type": "string",
-              "item_optional": true,
-              "item_default": "127.0.0.1",
-              "item_description": "listen-on address for HTTP"
-            },
-            {
-              "item_name": "port",
-              "item_type": "integer",
-              "item_optional": true,
-              "item_default": 8000,
-              "item_description": "listen-on port for HTTP"
-            }
-          ]
-        },
-        "item_description": "http listen-on address and port"
-      }
-    ],
-    "commands": [
-      {
-        "command_name": "status",
-        "command_description": "Status of the stats httpd",
-        "command_args": []
-      },
-      {
-        "command_name": "shutdown",
-        "command_description": "Shut down the stats httpd",
-        "command_args": []
-      }
-    ]
-  }
-}
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
new file mode 100644
index 0000000..37e9c1a
--- /dev/null
+++ b/src/bin/stats/stats-schema.spec
@@ -0,0 +1,87 @@
+{
+  "module_spec": {
+    "module_name": "Stats",
+    "module_description": "Statistics data schema",
+    "config_data": [
+      {
+        "item_name": "report_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Report time",
+        "item_description": "A date time when stats module reports",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "bind10.boot_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "bind10.BootTime",
+        "item_description": "A date time when bind10 process starts initially",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "stats.boot_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "stats.BootTime",
+        "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "stats.start_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "stats.StartTime",
+        "item_description": "A date time when the stats module starts collecting data or resetting values last time",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "stats.last_update_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "stats.LastUpdateTime",
+        "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "stats.timestamp",
+        "item_type": "real",
+        "item_optional": false,
+        "item_default": 0.0,
+        "item_title": "stats.Timestamp",
+        "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
+        "item_format": "second"
+      },
+      {
+        "item_name": "stats.lname",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "",
+        "item_title": "stats.LocalName",
+        "item_description": "A localname of stats module given via CC protocol"
+      },
+      {
+        "item_name": "auth.queries.tcp",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "auth.queries.tcp",
+        "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+      },
+      {
+        "item_name": "auth.queries.udp",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "auth.queries.udp",
+        "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+      }
+    ],
+    "commands": []
+  }
+}
diff --git a/src/bin/stats/stats-schema.spec.in b/src/bin/stats/stats-schema.spec.in
deleted file mode 100644
index 37e9c1a..0000000
--- a/src/bin/stats/stats-schema.spec.in
+++ /dev/null
@@ -1,87 +0,0 @@
-{
-  "module_spec": {
-    "module_name": "Stats",
-    "module_description": "Statistics data schema",
-    "config_data": [
-      {
-        "item_name": "report_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "Report time",
-        "item_description": "A date time when stats module reports",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "bind10.boot_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "bind10.BootTime",
-        "item_description": "A date time when bind10 process starts initially",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.boot_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "stats.BootTime",
-        "item_description": "A date time when the stats module starts initially or when the stats module restarts",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.start_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "stats.StartTime",
-        "item_description": "A date time when the stats module starts collecting data or resetting values last time",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.last_update_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "stats.LastUpdateTime",
-        "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.timestamp",
-        "item_type": "real",
-        "item_optional": false,
-        "item_default": 0.0,
-        "item_title": "stats.Timestamp",
-        "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
-        "item_format": "second"
-      },
-      {
-        "item_name": "stats.lname",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "",
-        "item_title": "stats.LocalName",
-        "item_description": "A localname of stats module given via CC protocol"
-      },
-      {
-        "item_name": "auth.queries.tcp",
-        "item_type": "integer",
-        "item_optional": false,
-        "item_default": 0,
-        "item_title": "auth.queries.tcp",
-        "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
-      },
-      {
-        "item_name": "auth.queries.udp",
-        "item_type": "integer",
-        "item_optional": false,
-        "item_default": 0,
-        "item_title": "auth.queries.udp",
-        "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
-      }
-    ],
-    "commands": []
-  }
-}
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
new file mode 100644
index 0000000..25f6b54
--- /dev/null
+++ b/src/bin/stats/stats.spec
@@ -0,0 +1,61 @@
+{
+  "module_spec": {
+    "module_name": "Stats",
+    "module_description": "Stats daemon",
+    "config_data": [],
+    "commands": [
+      {
+        "command_name": "status",
+        "command_description": "identify whether stats module is alive or not",
+        "command_args": []
+      },
+      {
+        "command_name": "show",
+        "command_description": "show the specified/all statistics data",
+        "command_args": [
+          {
+            "item_name": "stats_item_name",
+            "item_type": "string",
+            "item_optional": true,
+            "item_default": ""
+          }
+        ]
+      },
+      {
+        "command_name": "set",
+        "command_description": "set the value of specified name in statistics data",
+        "command_args": [
+          {
+            "item_name": "stats_data",
+            "item_type": "map",
+            "item_optional": false,
+            "item_default": {},
+            "map_item_spec": []
+          }
+        ]
+      },
+      {
+        "command_name": "remove",
+        "command_description": "remove the specified name from statistics data",
+        "command_args": [
+          {
+            "item_name": "stats_item_name",
+            "item_type": "string",
+            "item_optional": false,
+            "item_default": ""
+          }
+        ]
+      },
+      {
+        "command_name": "reset",
+        "command_description": "reset all statistics data to default values except for several constant names",
+        "command_args": []
+      },
+      {
+        "command_name": "shutdown",
+        "command_description": "Shut down the stats module",
+        "command_args": []
+      }
+    ]
+  }
+}
diff --git a/src/bin/stats/stats.spec.in b/src/bin/stats/stats.spec.in
deleted file mode 100644
index 25f6b54..0000000
--- a/src/bin/stats/stats.spec.in
+++ /dev/null
@@ -1,61 +0,0 @@
-{
-  "module_spec": {
-    "module_name": "Stats",
-    "module_description": "Stats daemon",
-    "config_data": [],
-    "commands": [
-      {
-        "command_name": "status",
-        "command_description": "identify whether stats module is alive or not",
-        "command_args": []
-      },
-      {
-        "command_name": "show",
-        "command_description": "show the specified/all statistics data",
-        "command_args": [
-          {
-            "item_name": "stats_item_name",
-            "item_type": "string",
-            "item_optional": true,
-            "item_default": ""
-          }
-        ]
-      },
-      {
-        "command_name": "set",
-        "command_description": "set the value of specified name in statistics data",
-        "command_args": [
-          {
-            "item_name": "stats_data",
-            "item_type": "map",
-            "item_optional": false,
-            "item_default": {},
-            "map_item_spec": []
-          }
-        ]
-      },
-      {
-        "command_name": "remove",
-        "command_description": "remove the specified name from statistics data",
-        "command_args": [
-          {
-            "item_name": "stats_item_name",
-            "item_type": "string",
-            "item_optional": false,
-            "item_default": ""
-          }
-        ]
-      },
-      {
-        "command_name": "reset",
-        "command_description": "reset all statistics data to default values except for several constant names",
-        "command_args": []
-      },
-      {
-        "command_name": "shutdown",
-        "command_description": "Shut down the stats module",
-        "command_args": []
-      }
-    ]
-  }
-}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100644
new mode 100755
index 97e9c78..a6fd066
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -437,8 +437,15 @@ class StatsHttpd:
             (k, v) = (str(k), str(v))
             elem = xml.etree.ElementTree.Element(k)
             elem.text = v
+            # The coding conversion is tricky. xml..tostring() of Python 3.2
+            # returns bytes (not string) regardless of the coding, while
+            # tostring() of Python 3.1 returns a string.  To support both
+            # cases transparently, we first make sure tostring() returns
+            # bytes by specifying utf-8 and then convert the result to a
+            # plain string (code below assume it).
             xml_list.append(
-                xml.etree.ElementTree.tostring(elem))
+                str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
+                    encoding='us-ascii'))
         xml_string = "".join(xml_list)
         self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
             xml_string=xml_string,
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index 5a13277..8163c7f 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -17,3 +17,8 @@ endif
 	B10_FROM_SOURCE=$(abs_top_srcdir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
index 879e8a8..79263a9 100644
--- a/src/bin/stats/tests/http/Makefile.am
+++ b/src/bin/stats/tests/http/Makefile.am
@@ -1,2 +1,6 @@
 EXTRA_DIST = __init__.py server.py
 CLEANFILES = __init__.pyc server.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
index 059107a..bfad7e3 100644
--- a/src/bin/stats/tests/isc/Makefile.am
+++ b/src/bin/stats/tests/isc/Makefile.am
@@ -1,3 +1,8 @@
 SUBDIRS = cc config util
 EXTRA_DIST = __init__.py
 CLEANFILES = __init__.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
index ccf4dde..67323b5 100644
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ b/src/bin/stats/tests/isc/cc/Makefile.am
@@ -1,2 +1,7 @@
 EXTRA_DIST = __init__.py session.py
 CLEANFILES = __init__.pyc session.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
index 5b0379a..ffbecda 100644
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ b/src/bin/stats/tests/isc/config/Makefile.am
@@ -1,2 +1,7 @@
 EXTRA_DIST = __init__.py ccsession.py
 CLEANFILES = __init__.pyc ccsession.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
index b09fdee..9c74354 100644
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ b/src/bin/stats/tests/isc/util/Makefile.am
@@ -1,2 +1,7 @@
 EXTRA_DIST = __init__.py process.py
 CLEANFILES = __init__.pyc process.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 4340c64..b5bcea2 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = process_rename_test.py
 # .py will be generated by configure, so we don't have to include it
 # in EXTRA_DIST.
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -12,6 +19,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index ee8505e..0af9be6 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,12 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
 
 b10_xfrindir = $(pkgdatadir)
 b10_xfrin_DATA = xfrin.spec
+pyexec_DATA = xfrin_messages.py
 
-CLEANFILES = b10-xfrin xfrin.pyc 
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
 
 man_MANS = b10-xfrin.8
 EXTRA_DIST = $(man_MANS) b10-xfrin.xml
-EXTRA_DIST += xfrin.spec
+EXTRA_DIST += xfrin.spec xfrin_messages.mes
 
 if ENABLE_MAN
 
@@ -20,8 +21,17 @@ b10-xfrin.8: b10-xfrin.xml
 
 endif
 
+# Define rule to build logging source files from message file
+xfrin_messages.py: xfrin_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py
+b10-xfrin: xfrin.py xfrin_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
 	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index d4efbc7..0f485aa 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	$(LIBRARY_PATH_PLACEHOLDER) \
+	env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index a9ca0f2..64e3563 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -29,12 +29,17 @@ from isc.config.ccsession import *
 from isc.notify import notify_out
 import isc.util.process
 import isc.net.parse
+from xfrin_messages import *
+
+isc.log.init("b10-xfrin")
+logger = isc.log.Logger("xfrin")
+
 try:
     from pydnspp import *
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrin process
     # must keep running, so we warn about it and move forward.
-    sys.stderr.write('[b10-xfrin] failed to import DNS module: %s\n' % str(e))
+    logger.error(XFRIN_IMPORT_DNS, str(e))
 
 isc.util.process.rename()
 
@@ -69,9 +74,6 @@ __version__ = 'BIND10'
 XFRIN_OK = 0
 XFRIN_FAIL = 1
 
-def log_error(msg):
-    sys.stderr.write("[b10-xfrin] %s\n" % str(msg))
-
 class XfrinException(Exception):
     pass
 
@@ -150,8 +152,7 @@ class XfrinConnection(asyncore.dispatcher):
             self.connect(self._master_address)
             return True
         except socket.error as e:
-            self.log_msg('Failed to connect:(%s), %s' % (self._master_address,
-                                                            str(e)))
+            logger.error(CONNECT_MASTER, self._master_address, str(e))
             return False
 
     def _create_query(self, query_type):
@@ -264,31 +265,27 @@ class XfrinConnection(asyncore.dispatcher):
                 logstr = 'SOA check for \'%s\' ' % self._zone_name
                 ret =  self._check_soa_serial()
 
-            logstr = 'transfer of \'%s\': AXFR ' % self._zone_name
             if ret == XFRIN_OK:
-                self.log_msg(logstr + 'started')
+                logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
                 self._send_query(RRType.AXFR())
                 isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
                                             self._handle_xfrin_response)
 
-                self.log_msg(logstr + 'succeeded')
+                logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
 
         except XfrinException as e:
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
             #TODO, recover data source.
         except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
         except UserWarning as e:
             # XXX: this is an exception from our C++ library via the
             # Boost.Python binding.  It would be better to have more more
             # specific exceptions, but at this moment this is the finest
             # granularity.
-            self.log_msg(e)
-            self.log_msg(logstr + 'failed')
+            logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
             ret = XFRIN_FAIL
         finally:
            self.close()
@@ -395,11 +392,6 @@ class XfrinConnection(asyncore.dispatcher):
         # Overwrite the log function, log nothing
         pass
 
-    def log_msg(self, msg):
-        if self._verbose:
-            sys.stdout.write('[b10-xfrin] %s\n' % str(msg))
-
-
 def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
                   shutdown_event, master_addrinfo, check_soa, verbose,
                   tsig_key):
@@ -481,8 +473,8 @@ class ZoneInfo:
             try:
                 self.master_addr = isc.net.parse.addr_parse(master_addr_str)
             except ValueError:
+                logger.error(XFRIN_BAD_MASTER_ADDR_FORMAT, master_addr_str)
                 errmsg = "bad format for zone's master: " + master_addr_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_master_port(self, master_port_str):
@@ -496,8 +488,8 @@ class ZoneInfo:
             try:
                 self.master_port = isc.net.parse.port_parse(master_port_str)
             except ValueError:
+                logger.error(XFRIN_BAD_MASTER_PORT_FORMAT, master_port_str)
                 errmsg = "bad format for zone's master port: " + master_port_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_zone_class(self, zone_class_str):
@@ -514,8 +506,8 @@ class ZoneInfo:
             try:
                 self.rrclass = RRClass(zone_class_str)
             except InvalidRRClass:
+                logger.error(XFRIN_BAD_ZONE_CLASS, zone_class_str)
                 errmsg = "invalid zone class: " + zone_class_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def set_tsig_key(self, tsig_key_str):
@@ -529,8 +521,8 @@ class ZoneInfo:
             try:
                 self.tsig_key = TSIGKey(tsig_key_str)
             except InvalidParameter as ipe:
+                logger.error(XFRIN_BAD_TSIG_KEY_STRING, tsig_key_str)
                 errmsg = "bad TSIG key string: " + tsig_key_str
-                log_error(errmsg)
                 raise XfrinZoneInfoException(errmsg)
 
     def get_master_addr_info(self):
@@ -556,7 +548,8 @@ class Xfrin:
         self._send_cc_session = isc.cc.Session()
         self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
                                               self.config_handler,
-                                              self.command_handler)
+                                              self.command_handler,
+                                              None, True)
         self._module_cc.start()
         config_data = self._module_cc.get_full_config()
         self.config_handler(config_data)
@@ -635,7 +628,7 @@ class Xfrin:
                 if zone_info is None:
                     # TODO what to do? no info known about zone. defaults?
                     errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
-                    log_error(errmsg)
+                    logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
                     answer = create_answer(1, errmsg)
                 else:
                     master_addr = zone_info.get_master_addr_info()
@@ -670,7 +663,7 @@ class Xfrin:
             else:
                 answer = create_answer(1, 'unknown command: ' + command)
         except XfrinException as err:
-            log_error('error happened for command: %s, %s' % (command, str(err)) )
+            logger.error(XFRIN_COMMAND_ERROR, command, str(err))
             answer = create_answer(1, str(err))
         return answer
 
@@ -762,8 +755,7 @@ class Xfrin:
                 except isc.cc.session.SessionTimeout:
                     pass        # for now we just ignore the failure
             except socket.error as err:
-                log_error("Fail to send message to %s and %s, msgq may has been killed"
-                          % (XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME))
+                logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME)
         else:
             msg = create_command(ZONE_XFRIN_FAILED, param)
             # catch the exception, in case msgq has been killed.
@@ -775,8 +767,7 @@ class Xfrin:
                 except isc.cc.session.SessionTimeout:
                     pass        # for now we just ignore the failure
             except socket.error as err:
-                log_error("Fail to send message to %s, msgq may has been killed"
-                          % ZONE_MANAGER_MODULE_NAME)
+                logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
 
     def startup(self):
         while not self._shutdown_event.is_set():
@@ -844,12 +835,11 @@ def main(xfrin_class, use_signal = True):
         xfrind = xfrin_class(verbose = options.verbose)
         xfrind.startup()
     except KeyboardInterrupt:
-        log_error("exit b10-xfrin")
+        logger.info(XFRIN_STOPPED_BY_KEYBOARD)
     except isc.cc.session.SessionError as e:
-        log_error(str(e))
-        log_error('Error happened! is the command channel daemon running?')
+        logger.error(XFRIN_CC_SESSION_ERROR, str(e))
     except Exception as e:
-        log_error(str(e))
+        logger.error(XFRIN_UNKNOWN_ERROR, str(e))
 
     if xfrind:
         xfrind.shutdown()
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
new file mode 100644
index 0000000..80a0be3
--- /dev/null
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -0,0 +1,91 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+
+% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+
+% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
+The AXFR transfer of the given zone was successfully completed.
+
+% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
+The given master address is not a valid IP address.
+
+% XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1
+The master port as read from the configuration is not a valid port number.
+
+% XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFRIN_BAD_ZONE_CLASS Invalid zone class: %1
+The zone class as read from the configuration is not a valid DNS class.
+
+% XFRIN_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+
+% XFRIN_COMMAND_ERROR error while executing command '%1': %2
+There was an error while the given command was being processed. The
+error is given in the log message.
+
+% XFRIN_CONNECT_MASTER error connecting to master at %1: %2
+There was an error opening a connection to the master. The error is
+shown in the log message.
+
+% XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+
+% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+
+% XFRIN_IMPORT_DNS error importing python DNS module: %1
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+
+% XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+
+% XFRIN_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+
+% XFRIN_UNKNOWN_ERROR unknown error: %1
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index d4f021e..82d7652 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -28,3 +28,8 @@ b10-xfrout: xfrout.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
 	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 11916af..6ca2b42 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+	env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index c0db5c6..adabf48 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -116,8 +116,8 @@ class TestXfroutSession(unittest.TestCase):
 
     def setUp(self):
         self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
-        self.log = isc.log.NSLogger('xfrout', '',  severity = 'critical', log_to_console = False )
-        self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), self.log, TSIGKeyRing())
+        #self.log = isc.log.NSLogger('xfrout', '',  severity = 'critical', log_to_console = False )
+        self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), TSIGKeyRing())
         self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
         self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
 
@@ -520,7 +520,7 @@ class MyUnixSockServer(UnixSockServer):
         self._shutdown_event = threading.Event()
         self._max_transfers_out = 10
         self._cc = MyCCSession()
-        self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
+        #self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
 
 class TestUnixSockServer(unittest.TestCase):
     def setUp(self):
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index f352c74..ac22fe4 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -26,7 +26,7 @@ from isc.datasrc import sqlite3_ds
 from socketserver import *
 import os
 from isc.config.ccsession import *
-from isc.log.log import *
+#from isc.log.log import *
 from isc.cc import SessionError, SessionTimeout
 from isc.notify import notify_out
 import isc.util.process
@@ -88,13 +88,13 @@ def get_rrset_len(rrset):
 
 
 class XfroutSession():
-    def __init__(self, sock_fd, request_data, server, log, tsig_key_ring):
+    def __init__(self, sock_fd, request_data, server, tsig_key_ring):
         # The initializer for the superclass may call functions
         # that need _log to be set, so we set it first
         self._sock_fd = sock_fd
         self._request_data = request_data
         self._server = server
-        self._log = log
+        #self._log = log
         self._tsig_key_ring = tsig_key_ring
         self._tsig_ctx = None
         self._tsig_len = 0
@@ -110,7 +110,8 @@ class XfroutSession():
             self.dns_xfrout_start(self._sock_fd, self._request_data)
             #TODO, avoid catching all exceptions
         except Exception as e:
-            self._log.log_message("error", str(e))
+            #self._log.log_message("error", str(e))
+            pass
 
         os.close(self._sock_fd)
 
@@ -137,7 +138,7 @@ class XfroutSession():
             rcode = self._check_request_tsig(msg, mdata)
 
         except Exception as err:
-            self._log.log_message("error", str(err))
+            #self._log.log_message("error", str(err))
             return Rcode.FORMERR(), None
 
         return rcode, msg
@@ -244,16 +245,17 @@ class XfroutSession():
         zone_name = self._get_query_zone_name(msg)
         rcode_ = self._check_xfrout_available(zone_name)
         if rcode_ != Rcode.NOERROR():
-            self._log.log_message("info", "transfer of '%s/IN' failed: %s",
-                                  zone_name, rcode_.to_text())
+            #self._log.log_message("info", "transfer of '%s/IN' failed: %s",
+            #                      zone_name, rcode_.to_text())
             return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
 
         try:
-            self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
+            #self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
             self._reply_xfrout_query(msg, sock_fd, zone_name)
-            self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
+            #self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
         except Exception as err:
-            self._log.log_message("error", str(err))
+            #self._log.log_message("error", str(err))
+            pass
 
         self._server.decrease_transfers_counter()
         return
@@ -317,7 +319,7 @@ class XfroutSession():
 
         for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
             if  self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
-                self._log.log_message("info", "xfrout process is being shutdown")
+                #self._log.log_message("info", "xfrout process is being shutdown")
                 return
             # TODO: RRType.SOA() ?
             if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
@@ -357,7 +359,7 @@ class XfroutSession():
 class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
     '''The unix domain socket server which accept xfr query sent from auth server.'''
 
-    def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc, log):
+    def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc):
         self._remove_unused_sock_file(sock_file)
         self._sock_file = sock_file
         socketserver_mixin.NoPollMixIn.__init__(self)
@@ -366,7 +368,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         self._transfers_counter = 0
         self._shutdown_event = shutdown_event
         self._write_sock, self._read_sock = socket.socketpair()
-        self._log = log
+        #self._log = log
         self.update_config_data(config_data)
         self._cc = cc
 
@@ -394,7 +396,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         try:
             request, client_address = self.get_request()
         except socket.error:
-            self._log.log_message("error", "Failed to fetch request")
+            #self._log.log_message("error", "Failed to fetch request")
             return
 
         # Check self._shutdown_event to ensure the real shutdown comes.
@@ -408,7 +410,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
                     (rlist, wlist, xlist) = ([], [], [])
                     continue
                 else:
-                    self._log.log_message("error", "Error with select(): %s" %e)
+                    #self._log.log_message("error", "Error with select(): %s" %e)
                     break
 
             # self.server._shutdown_event will be set by now, if it is not a false
@@ -419,8 +421,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             try:
                 self.process_request(request)
             except:
-                self._log.log_message("error", "Exception happened during processing of %s"
-                                      % str(client_address))
+                #self._log.log_message("error", "Exception happened during processing of %s"
+                #                      % str(client_address))
                 break
 
     def _handle_request_noblock(self):
@@ -438,8 +440,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             # This may happen when one xfrout process try to connect to
             # xfrout unix socket server, to check whether there is another
             # xfrout running.
-            if sock_fd == FD_COMM_ERROR:
-                self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
+            #if sock_fd == FD_COMM_ERROR:
+                #self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
             return
 
         # receive request msg
@@ -456,7 +458,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
 
     def finish_request(self, sock_fd, request_data):
         '''Finish one request by instantiating RequestHandlerClass.'''
-        self.RequestHandlerClass(sock_fd, request_data, self, self._log, self.tsig_key_ring)
+        self.RequestHandlerClass(sock_fd, request_data, self, self.tsig_key_ring)
 
     def _remove_unused_sock_file(self, sock_file):
         '''Try to remove the socket file. If the file is being used
@@ -464,8 +466,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         If it's not a socket file or nobody is listening
         , it will be removed. If it can't be removed, exit from python. '''
         if self._sock_file_in_use(sock_file):
-            self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
-                                 " is being used by another xfrout process\n" % sock_file)
+            #self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
+            #                     " is being used by another xfrout process\n" % sock_file)
             sys.exit(0)
         else:
             if not os.path.exists(sock_file):
@@ -474,7 +476,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             try:
                 os.unlink(sock_file)
             except OSError as err:
-                self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
+                #self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
                 sys.exit(0)
 
     def _sock_file_in_use(self, sock_file):
@@ -495,17 +497,18 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         try:
             os.unlink(self._sock_file)
         except Exception as e:
-            self._log.log_message('error', str(e))
+            #self._log.log_message('error', str(e))
+            pass
 
     def update_config_data(self, new_config):
         '''Apply the new config setting of xfrout module. '''
-        self._log.log_message('info', 'update config data start.')
+        #self._log.log_message('info', 'update config data start.')
         self._lock.acquire()
         self._max_transfers_out = new_config.get('transfers_out')
         self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
-        self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
+        #self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
         self._lock.release()
-        self._log.log_message('info', 'update config data complete.')
+        #self._log.log_message('info', 'update config data complete.')
 
     def set_tsig_key_ring(self, key_list):
         """Set the tsig_key_ring , given a TSIG key string list representation. """
@@ -521,7 +524,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
                 self.tsig_key_ring.add(TSIGKey(key_item))
             except InvalidParameter as ipe:
                 errmsg = "bad TSIG key string: " + str(key_item)
-                self._log.log_message('error', '%s' % errmsg)
+                #self._log.log_message('error', '%s' % errmsg)
 
     def get_db_file(self):
         file, is_default = self._cc.get_remote_config_value("Auth", "database_file")
@@ -553,16 +556,16 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
 class XfroutServer:
     def __init__(self):
         self._unix_socket_server = None
-        self._log = None
+        #self._log = None
         self._listen_sock_file = UNIX_SOCKET_FILE
         self._shutdown_event = threading.Event()
-        self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler)
+        self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler, None, True)
         self._config_data = self._cc.get_full_config()
         self._cc.start()
         self._cc.add_remote_config(AUTH_SPECFILE_LOCATION);
-        self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
-                                self._config_data.get('log_severity'), self._config_data.get('log_versions'),
-                                self._config_data.get('log_max_bytes'), True)
+        #self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
+        #                        self._config_data.get('log_severity'), self._config_data.get('log_versions'),
+        #                        self._config_data.get('log_max_bytes'), True)
         self._start_xfr_query_listener()
         self._start_notifier()
 
@@ -570,13 +573,13 @@ class XfroutServer:
         '''Start a new thread to accept xfr query. '''
         self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
                                                   self._shutdown_event, self._config_data,
-                                                  self._cc, self._log);
+                                                  self._cc)
         listener = threading.Thread(target=self._unix_socket_server.serve_forever)
         listener.start()
 
     def _start_notifier(self):
         datasrc = self._unix_socket_server.get_db_file()
-        self._notifier = notify_out.NotifyOut(datasrc, self._log)
+        self._notifier = notify_out.NotifyOut(datasrc)
         self._notifier.dispatcher()
 
     def send_notify(self, zone_name, zone_class):
@@ -591,8 +594,8 @@ class XfroutServer:
                 continue
             self._config_data[key] = new_config[key]
 
-        if self._log:
-            self._log.update_config(new_config)
+        #if self._log:
+        #    self._log.update_config(new_config)
 
         if self._unix_socket_server:
             self._unix_socket_server.update_config_data(self._config_data)
@@ -621,7 +624,7 @@ class XfroutServer:
 
     def command_handler(self, cmd, args):
         if cmd == "shutdown":
-            self._log.log_message("info", "Received shutdown command.")
+            #self._log.log_message("info", "Received shutdown command.")
             self.shutdown()
             answer = create_answer(0)
 
@@ -629,8 +632,8 @@ class XfroutServer:
             zone_name = args.get('zone_name')
             zone_class = args.get('zone_class')
             if zone_name and zone_class:
-                self._log.log_message("info", "zone '%s/%s': receive notify others command" \
-                                       % (zone_name, zone_class))
+                #self._log.log_message("info", "zone '%s/%s': receive notify others command" \
+                #                       % (zone_name, zone_class))
                 self.send_notify(zone_name, zone_class)
                 answer = create_answer(0)
             else:
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 410279a..8ab5f7a 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -26,3 +26,8 @@ b10-zonemgr: zonemgr.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.py >$@
 	chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 496c1a4..97f9b5e 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = zonemgr_test.py
 EXTRA_DIST = $(PYTESTS)
 CLEANFILES = initdb.file
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -12,6 +19,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index b063289..defaf13 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -1,6 +1,28 @@
-SUBDIRS = tests
+SUBDIRS = . tests
 
-EXTRA_DIST = check.h acl.h
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
 
-# TODO: Once we have some cc file we are able to compile, create the library.
-# For now, we have only header files, not creating empty library.
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+# The core library
+lib_LTLIBRARIES = libacl.la
+libacl_la_SOURCES  = acl.h
+libacl_la_SOURCES += check.h
+libacl_la_SOURCES += ip_check.h ip_check.cc
+libacl_la_SOURCES += logic_check.h
+libacl_la_SOURCES += loader.h loader.cc
+
+libacl_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
+
+# DNS specialized one
+lib_LTLIBRARIES += libdnsacl.la
+
+libdnsacl_la_SOURCES = dns.h dns.cc
+
+libdnsacl_la_LIBADD = libacl.la
+libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
+
+CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
new file mode 100644
index 0000000..16f1bf5
--- /dev/null
+++ b/src/lib/acl/dns.cc
@@ -0,0 +1,34 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "dns.h"
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+Loader&
+getLoader() {
+    static Loader* loader(NULL);
+    if (loader == NULL) {
+        loader = new Loader(REJECT);
+        // TODO: This is the place where we register default check creators
+        // like IP check, etc, once we have them.
+    }
+    return (*loader);
+}
+
+}
+}
+}
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
new file mode 100644
index 0000000..6f36e51
--- /dev/null
+++ b/src/lib/acl/dns.h
@@ -0,0 +1,89 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_DNS_H
+#define ACL_DNS_H
+
+#include "loader.h"
+
+#include <asiolink/io_address.h>
+#include <dns/message.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/**
+ * \brief DNS request to be checked.
+ *
+ * This plays the role of Context of the generic template ACLs (in namespace
+ * isc::acl).
+ *
+ * It is simple structure holding just the bunch of information. Therefore
+ * the names don't end up with a slash, there are no methods so they can't be
+ * confused with local variables.
+ *
+ * \todo Do we want a constructor to set this in a shorter manner? So we can
+ *     call the ACLs directly?
+ */
+struct RequestContext {
+    /// \brief The DNS message (payload).
+    isc::dns::ConstMessagePtr message;
+    /// \brief The remote IP address (eg. the client).
+    asiolink::IOAddress remote_address;
+    /// \brief The local IP address (ours, of the interface where we received).
+    asiolink::IOAddress local_address;
+    /// \brief The remote port.
+    uint16_t remote_port;
+    /// \brief The local port.
+    uint16_t local_port;
+    /**
+     * \brief Name of the TSIG key the message is signed with.
+     *
+     * This will be either the name of the TSIG key the message is signed with,
+     * or empty string, if the message is not signed. It is true we could get
+     * the information from the message itself, but because at the time when
+     * the ACL is checked, the signature has been verified already, so passing
+     * it around is probably cheaper.
+     *
+     * It is expected that messages with invalid signatures are handled before
+     * ACL.
+     */
+    std::string tsig_key_name;
+};
+
+/// \brief DNS based check.
+typedef acl::Check<RequestContext> Check;
+/// \brief DNS based compound check.
+typedef acl::CompoundCheck<RequestContext> CompoundCheck;
+/// \brief DNS based ACL.
+typedef acl::ACL<RequestContext> ACL;
+/// \brief DNS based ACL loader.
+typedef acl::Loader<RequestContext> Loader;
+
+/**
+ * \brief Loader singleton access function.
+ *
+ * This function returns a loader of ACLs. It is expected applications
+ * will use this function instead of creating their own loaders, because
+ * one is enough, this one will have registered default checks and it
+ * is known one, so any plugins can registrer additional checks as well.
+ */
+Loader& getLoader();
+
+}
+}
+}
+
+#endif
diff --git a/src/lib/acl/ip_check.cc b/src/lib/acl/ip_check.cc
new file mode 100644
index 0000000..08c8431
--- /dev/null
+++ b/src/lib/acl/ip_check.cc
@@ -0,0 +1,111 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/lexical_cast.hpp>
+
+#include <acl/ip_check.h>
+
+using namespace std;
+
+// Split the IP Address prefix
+
+namespace isc {
+namespace acl {
+namespace internal {
+
+uint8_t
+createMask(size_t prefixlen) {
+
+    if (prefixlen == 0) {
+        return (0);
+
+    } else if (prefixlen <= 8) {
+
+        // In the following discussion:
+        //
+        // w is the width of the data type in bits.
+        // m is the value of prefixlen, the number of most signifcant bits we
+        // want to set.
+        // ** is exponentiation (i.e. 2**n is 2 raised to the power of n).
+        //
+        // We note that the value of 2**m - 1 gives a value with the least
+        // significant m bits set.  For a data type of width w, this means that
+        // the most signficant (w-m) bits are clear.
+        //
+        // Hence the value 2**(w-m) - 1 gives a result with the least signficant
+        // w-m bits set and the most significant m bits clear.  The 1's
+        // complement of this value gives is the result we want.
+        //
+        // Final note: at this point in the logic, m is non-zero, so w-m < w.
+        // This means 1<<(w-m) will fit into a variable of width w bits.  In
+        // other words, in the expression below, no term will cause an integer
+        // overflow.
+        return (~((1 << (8 - prefixlen)) - 1));
+    }
+
+    // Mask size is too large. (Note that prefixlen is unsigned, so can't be
+    // negative.)
+    isc_throw(isc::OutOfRange, "prefixlen argument must be between 0 and 8");
+}
+
+pair<string, int>
+splitIPAddress(const string& ipprefix) {
+
+    // Split string into its components - an address and a prefix length.
+    // We initialize by assuming that there is no slash in the string given.
+    string address = ipprefix;
+    string prefixlen = "";
+
+    const size_t slashpos = ipprefix.find('/');
+    if ((ipprefix.size() == 0) || (slashpos == 0) ||
+        (slashpos == (ipprefix.size() - 1))) {
+        // Nothing in prefix, or it starts with or ends with a slash.
+        isc_throw(isc::InvalidParameter, "address prefix of " << ipprefix <<
+                                         " is not valid");
+
+    } else if (slashpos != string::npos) {
+        // There is a slash somewhere in the string, split the string on it.
+        // Don't worry about multiple slashes - if there are some, they will
+        // appear in the prefixlen segment and will be detected when an attempt
+        // is made to convert it to a number.
+        address = ipprefix.substr(0, slashpos);
+        prefixlen = ipprefix.substr(slashpos + 1);
+    }
+
+    // Set the default value for the prefix length.  As the type of the address
+    // is not known at the point this function is called, the maximum
+    // allowable value is also not known.  The value of 0 is reserved for
+    // a "match any address" match.
+    int prefix_size = -1;
+
+    // If there is a prefixlength, attempt to convert it.
+    if (!prefixlen.empty()) {
+        try {
+            prefix_size = boost::lexical_cast<int>(prefixlen);
+            if (prefix_size < 0) {
+                isc_throw(isc::InvalidParameter, "address prefix of " <<
+                          ipprefix << " is not valid");
+            }
+        } catch (boost::bad_lexical_cast&) {
+            isc_throw(isc::InvalidParameter, "prefix length of '" <<
+                      prefixlen << "' is not valid");
+        }
+    }
+
+    return (make_pair(address, prefix_size));
+}
+
+} // namespace internal
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/acl/ip_check.h b/src/lib/acl/ip_check.h
new file mode 100644
index 0000000..5bc70fc
--- /dev/null
+++ b/src/lib/acl/ip_check.h
@@ -0,0 +1,354 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IP_CHECK_H
+#define __IP_CHECK_H
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+#include <boost/static_assert.hpp>
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sys/socket.h> // for AF_INET/AF_INET6
+#include <netinet/in.h>
+
+#include <acl/check.h>
+#include <exceptions/exceptions.h>
+#include <util/strutil.h>
+
+namespace isc {
+namespace acl {
+
+// Free functions.  These are not supposed to be used outside this module,
+// but are declared public for testing.  To try to conceal them, they are
+// put in an "internal" namespace.
+
+namespace internal {
+
+/// \brief Convert prefix length to mask
+///
+/// Given a prefix length and a data type, return a value of that data type
+/// with the most significant "prefix length" bits set.  For example, if the
+/// data type is an uint8_t and the prefix length is 3, the function would
+/// return a uint8_t holding the binary value 11100000.  This value is used as
+/// a mask in the address checks.
+///
+/// \param prefixlen number of bits to be set in the mask.  This must be
+///        between 0 and 8.
+///
+/// \return uint8_t with the most significant "prefixlen" bits set.
+///
+/// \exception OutOfRange prefixlen is too large for the data type.
+
+uint8_t createMask(size_t prefixlen);
+
+/// \brief Split IP Address Prefix
+///
+/// Splits an IP address prefix (given in the form of "xxxxxx/n" or "xxxxx" into
+/// a string representing the IP address and a number giving the length of the
+/// prefix. (In the latter case, the prefix is equal in length to the width in
+/// width in bits of the data type holding the address.) An exception will be
+/// thrown if the string format is invalid or if the prefix length is invalid.
+///
+/// N.B. This function does NOT check that the address component is a valid IP
+/// address; this is done elsewhere in the address parsing process.
+///
+/// \param ipprefix Address or address prefix.  The string should be passed
+///                 without leading or trailing spaces.
+///
+/// \return Pair of (string, int) holding the address string and the prefix
+///         length.  The second element is -1 if no prefix was given.
+///
+/// \exception InvalidParameter Address prefix not of the expected syntax
+
+std::pair<std::string, int>
+splitIPAddress(const std::string& ipprefix);
+
+} // namespace internal
+
+
+
+/// \brief IP Check
+///
+/// This class performs a match between an IP address prefix specified in an ACL
+/// and a given IP address.  The check works for both IPv4 and IPv6 addresses.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+
+template <typename Context>
+class IPCheck : public Check<Context> {
+private:
+    // Size of uint8_t array needed to hold different address types
+    static const size_t IPV6_SIZE = sizeof(struct in6_addr);
+    static const size_t IPV4_SIZE = sizeof(struct in_addr);
+
+    // Confirm our assumption of relative sizes - this allows us to assume that
+    // an array sized for an IPv6 address can hold an IPv4 address.
+    BOOST_STATIC_ASSERT(sizeof(struct in6_addr) > sizeof(struct in_addr));
+
+public:
+    /// \brief String Constructor
+    ///
+    /// Constructs an IP Check object from an address or address prefix in the
+    /// form <ip-address>/n".
+    ///
+    /// Also allowed are the special keywords "any4" and "any6", which match
+    /// any IPv4 or IPv6 address.  These must be specified in lowercase.
+    ///
+    /// \param ipprefix IP address prefix in the form "<ip-address>/n"
+    ///        (where the "/n" part is optional and should be valid for the
+    ///        address).  If "n" is specified as zero, the match is for any
+    ///        address in that address family.  The address can also be
+    ///        given as "any4" or "any6".
+    IPCheck(const std::string& ipprefix) : family_(0) {
+
+        // Ensure array elements are correctly initialized with zeroes.
+        std::fill(address_, address_ + IPV6_SIZE, 0);
+        std::fill(mask_, mask_ + IPV6_SIZE, 0);
+
+        // Only deal with the string after we've removed leading and trailing
+        // spaces.
+        const std::string mod_prefix = isc::util::str::trim(ipprefix);
+
+        // Check for special cases first.
+        if (mod_prefix == "any4") {
+            family_ = AF_INET;
+
+        } else if (mod_prefix == "any6") {
+            family_ = AF_INET6;
+
+        } else {
+
+            // General address prefix.  Split into address part and prefix
+            // length.
+            const std::pair<std::string, int> result =
+                internal::splitIPAddress(mod_prefix);
+
+            // Try to convert the address.  If successful, the result is in
+            // network-byte order (most significant components at lower
+            // addresses).
+            int status = inet_pton(AF_INET6, result.first.c_str(), address_);
+            if (status == 1) {
+                // It was an IPv6 address.
+                family_ = AF_INET6;
+            } else {
+                // IPv6 interpretation failed, try IPv4.
+                status = inet_pton(AF_INET, result.first.c_str(), address_);
+                if (status == 1) {
+                    family_ = AF_INET;
+                }
+            }
+
+            // Handle errors.
+            if (status == 0) {
+                isc_throw(isc::InvalidParameter, "address prefix of " <<
+                          ipprefix << " is not valid");
+            } else if (status < 0) {
+                isc_throw(isc::Unexpected, "address conversion of " <<
+                          ipprefix << " failed due to a system error");
+            }
+
+            // All done, so set the mask used in the address comparison.
+            setMask(result.second);
+        }
+    }
+
+    /// \brief Destructor
+    virtual ~IPCheck() {}
+
+    /// \brief The check itself
+    ///
+    /// Matches the passed argument to the condition stored here.  Different
+    /// specialisations must be provided for different argument types, and the
+    /// program will fail to compile if a required specialisation is not
+    /// provided.
+    ///
+    /// It is expected that matches() will extract the address information from
+    /// the Context structure, and use compare() to actually perform the
+    /// comparison.
+    ///
+    /// \param context Information to be matched
+    virtual bool matches(const Context& context) const;
+
+    /// \brief Estimated cost
+    ///
+    /// Assume that the cost of the match is linear and depends on the
+    /// maximum number of comparison operations.
+    ///
+    /// \return Estimated cost of the comparison
+    virtual unsigned cost() const {
+        return ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+    }
+
+    ///@{
+    /// Access methods - mainly for testing
+
+    /// \return Stored IP address
+    std::vector<uint8_t> getAddress() const {
+        const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+        return (std::vector<uint8_t>(address_, address_ + vector_len));
+    }
+
+    /// \return Network mask applied to match
+    std::vector<uint8_t> getMask() const {
+        const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+        return (std::vector<uint8_t>(mask_, mask_ + vector_len));
+    }
+
+    /// \return Prefix length of the match
+    size_t getPrefixlen() const {
+        // Work this out by counting bits in the mask.
+        size_t count = 0;
+        for (size_t i = 0; i < IPV6_SIZE; ++i) {
+            if (mask_[i] == 0xff) {
+                // All bits set in this byte
+                count += 8;
+                continue;
+
+            } else if (mask_[i] != 0) {
+                // Only some bits set in this byte.  Count them.
+                uint8_t byte = mask_[i];
+                for (int j = 0; j < 8; ++j) {
+                    count += byte & 0x01;   // Add one if the bit is set
+                    byte >>= 1;             // Go for next bit
+                }
+            }
+            break;
+        }
+        return (count);
+    }
+
+    /// \return Address family
+    int getFamily() const {
+        return (family_);
+    }
+    ///@}
+
+protected:
+    /// \brief Comparison
+    ///
+    /// This is the actual comparison function that checks the IP address passed
+    /// to this class with the matching information in the class itself.  It is
+    /// expected to be called from matches().
+    ///
+    /// \param testaddr Address (in network byte order) to test against the
+    ///                 check condition in the class.  This is expected to
+    ///                 be IPV6_SIZE or IPV4_SIZE bytes long.
+    /// \param family   Address family of testaddr.
+    ///
+    /// \return true if the address matches, false if it does not.
+    virtual bool compare(const uint8_t* testaddr, int family) const {
+
+        if (family != family_) {
+            // Can't match if the address is of the wrong family
+            return (false);
+        }
+
+        // Simple check failed, so have to do a complete match.  To check that
+        // the address given matches the stored network address and mask, we
+        // check the simple condition that:
+        //
+        //     address_given & mask_ == stored_address & mask_
+        //
+        // The result is checked for all bytes for which there are bits set in
+        // the mask.  We stop at the first non-match (or when we run out of bits
+        // in the mask).
+        //
+        // Note that the mask represents a contiguous set of bits.  As such, as
+        // soon as we find a mask byte of zeroes, we have run past the part of
+        // the address where we need to match.
+        //
+        // Note also that when checking an IPv4 address, the constructor has
+        // set all bytes in the mask beyond the first four bytes to zero.
+        // As the loop stops when it encounters a zero mask byte, if the
+        // ACL is for an IPV4 address, the loop will never check more than four
+        // bytes.
+
+        bool match = true;
+        for (int i = 0; match && (i < IPV6_SIZE) && (mask_[i] != 0); ++i) {
+             match = ((testaddr[i] & mask_[i]) == (address_[i] & mask_[i]));
+        }
+        return (match);
+    }
+
+private:
+    /// \brief Set Mask
+    ///
+    /// Sets up the mask from the prefix length.  This involves setting
+    /// an individual mask in each byte of the mask array.
+    ///
+    /// The actual allowed value of the prefix length depends on the address
+    /// family.
+    ///
+    /// \param requested Requested prefix length size.  If negative, the
+    ///        maximum for the address family is assumed.  (A negative value
+    ///        will arise if the string constructor was used and no mask size
+    ///        was given.)
+    void setMask(int requested) {
+
+        // Set the maximum number of bits allowed in the mask, and request
+        // that number of bits if no prefix length was given in the constructor.
+        const int maxmask = 8 * ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+        if (requested < 0) {
+            requested = maxmask;
+        }
+
+        // Validate that the mask is valid.
+        if (requested <= maxmask) {
+
+            // Loop, setting the bits in the set of mask bytes until all the
+            // specified bits have been used up.  As both IPv4 and IPv6
+            // addresses are stored in network-byte order, this works in
+            // both cases.
+            size_t bits_left = requested;   // Bits remaining to set
+            int i = -1;
+            while (bits_left > 0) {
+                if (bits_left >= 8) {
+                    mask_[++i] = ~0;  // All bits set
+                    bits_left -= 8;
+
+                } else if (bits_left > 0) {
+                    mask_[++i] = internal::createMask(bits_left);
+                    bits_left = 0;
+                }
+            }
+        } else {
+            isc_throw(isc::OutOfRange,
+                      "mask size of " << requested << " is invalid " <<
+                      "for the given address family");
+        }
+    }
+
+    // Member variables.
+    uint8_t address_[IPV6_SIZE];  ///< Address in binary form
+    uint8_t mask_[IPV6_SIZE];     ///< Address mask
+    int     family_;              ///< Address family
+};
+
+// Some compilers seem to need this to be explicitly defined outside the class
+template <typename Context>
+const size_t IPCheck<Context>::IPV6_SIZE;
+
+template <typename Context>
+const size_t IPCheck<Context>::IPV4_SIZE;
+
+} // namespace acl
+} // namespace isc
+
+#endif // __IP_CHECK_H
diff --git a/src/lib/acl/loader.cc b/src/lib/acl/loader.cc
new file mode 100644
index 0000000..8ca7e28
--- /dev/null
+++ b/src/lib/acl/loader.cc
@@ -0,0 +1,46 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "loader.h"
+
+using namespace std;
+
+namespace isc {
+namespace acl {
+
+BasicAction defaultActionLoader(data::ConstElementPtr actionEl) {
+    try {
+        const string action(actionEl->stringValue());
+        if (action == "ACCEPT") {
+            return (ACCEPT);
+        } else if (action == "REJECT") {
+            return (REJECT);
+        } else if (action == "DROP") {
+            return (DROP);
+        } else {
+            throw LoaderError(__FILE__, __LINE__,
+                              string("Unknown action '" + action + "'").
+                                  c_str(),
+                              actionEl);
+        }
+    }
+    catch (const data::TypeError&) {
+        throw LoaderError(__FILE__, __LINE__,
+                          "Invalid element type for action, must be string",
+                          actionEl);
+    }
+}
+
+}
+}
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
new file mode 100644
index 0000000..c3400cb
--- /dev/null
+++ b/src/lib/acl/loader.h
@@ -0,0 +1,448 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOADER_H
+#define ACL_LOADER_H
+
+#include "acl.h"
+#include <cc/data.h>
+#include <boost/function.hpp>
+#include <boost/shared_ptr.hpp>
+#include <map>
+
+namespace isc {
+namespace acl {
+
+class AnyOfSpec;
+class AllOfSpec;
+template<typename Mode, typename Context> class LogicOperator;
+
+/**
+ * \brief Exception for bad ACL specifications.
+ *
+ * This will be thrown by the Loader if the ACL description is malformed
+ * in some way.
+ *
+ * It also can hold optional JSON element where was the error detected, so
+ * it can be examined.
+ *
+ * Checks may subclass this exception for similar errors if they see it fit.
+ */
+class LoaderError : public BadValue {
+private:
+    const data::ConstElementPtr element_;
+public:
+    /**
+     * \brief Constructor.
+     *
+     * Should be used with isc_throw if the fourth argument isn't used.
+     *
+     * \param file The file where the throw happened.
+     * \param line Similar as file, just for the line number.
+     * \param what Human readable description of what happened.
+     * \param element This might be passed to hold the JSON element where
+     *     the error was detected.
+     */
+    LoaderError(const char* file, size_t line, const char* what,
+                data::ConstElementPtr element = data::ConstElementPtr()) :
+        BadValue(file, line, what),
+        element_(element)
+    {}
+    ~ LoaderError() throw() {}
+    /**
+     * \brief Get the element.
+     *
+     * This returns the element where the error was detected. Note that it
+     * might be NULL in some situations.
+     */
+    const data::ConstElementPtr& element() const {
+        return (element_);
+    }
+};
+
+/**
+ * \brief Loader of the default actions of ACLs.
+ *
+ * Declared outside the Loader class, as this one does not need to be
+ * templated. This will throw LoaderError if the parameter isn't string
+ * or if it doesn't contain one of the accepted values.
+ *
+ * \param action The JSON representation of the action. It must be a string
+ *     and contain one of "ACCEPT", "REJECT" or "DENY".
+ * \note We could define different names or add aliases if needed.
+ */
+BasicAction defaultActionLoader(data::ConstElementPtr action);
+
+/**
+ * \brief Loader of ACLs.
+ *
+ * The goal of this class is to convert JSON description of an ACL to object
+ * of the ACL class (including the checks inside it).
+ *
+ * The class can be used to load the checks only. This is supposed to be used
+ * by compound checks to create the subexpressions.
+ *
+ * To allow any kind of checks to exist in the application, creators are
+ * registered for the names of the checks.
+ *
+ * An ACL definition looks like this:
+ * \verbatim
+ * [
+ *   {
+ *      "action": "ACCEPT",
+ *      "match-type": <parameter>
+ *   },
+ *   {
+ *      "action": "REJECT",
+ *      "match-type": <parameter>
+ *      "another-match-type": [<parameter1>, <parameter2>]
+*    },
+*    {
+*       "action": "DROP"
+*    }
+ * ]
+ * \endverbatim
+ *
+ * This is a list of elements. Each element must have an "action"
+ * entry/keyword. That one specifies which action is returned if this
+ * element matches (the value of the key is passed to the action loader
+ * (see the constructor). It may be any piece of JSON which the action
+ * loader expects.
+ *
+ * The rest of the element are matches. The left side is the name of the
+ * match type (for example match for source IP address or match for message
+ * size). The <parameter> is whatever is needed to describe the match and
+ * depends on the match type, the loader passes it verbatim to creator
+ * of that match type.
+ *
+ * There may be multiple match types in single element. In such case, all
+ * of the matches must match for the element to take action (so, in the second
+ * element, both "match-type" and "another-match-type" must be satisfied).
+ * If there's no match in the element, the action is taken/returned without
+ * conditions, every time (makes sense as the last entry, as the ACL will
+ * never get past it).
+ *
+ * The second entry shows another thing - if there's a list as the value
+ * for some match and the match itself is not expecting a list, it is taken
+ * as an "or" - a match for at last one of the choices in the list must match.
+ * So, for the second entry, both "match-type" and "another-match-type" must
+ * be satisfied, but the another one is satisfied by either parameter1 or
+ * parameter2.
+ */
+template<typename Context, typename Action = BasicAction> class Loader {
+public:
+    /**
+     * \brief Constructor.
+     *
+     * \param default_action The default action for created ACLs.
+     * \param actionLoader is the loader which will be used to convert actions
+     *     from their JSON representation. The default value is suitable for
+     *     the BasicAction enum. If you did not specify the second
+     *     template argument, you don't need to specify this loader.
+     */
+    Loader(const Action& defaultAction,
+           const boost::function1<Action, data::ConstElementPtr>
+               &actionLoader = &defaultActionLoader) :
+        default_action_(defaultAction),
+        action_loader_(actionLoader)
+    {}
+    /**
+     * \brief Creator of the checks.
+     *
+     * This can be registered within the Loader and will be used to create the
+     * checks. It is expected multiple creators (for multiple types, one can
+     * handle even multiple names) will be created and registered to support
+     * range of things we could check. This allows for customizing/extending
+     * the loader.
+     */
+    class CheckCreator {
+    public:
+        /**
+         * \brief List of names supported by this loader.
+         *
+         * List of all names for which this loader is able to create the
+         * checks. There can be multiple names, to support both aliases
+         * to the same checks and creators capable of creating multiple
+         * types of checks.
+         */
+        virtual std::vector<std::string> names() const = 0;
+        /**
+         * \brief Creates the check.
+         *
+         * This function does the actual creation. It is passed all the
+         * relevant data and is supposed to return shared pointer to the
+         * check.
+         *
+         * It is expected to throw the LoaderError exception when the
+         * definition is invalid.
+         *
+         * \param name The type name of the check. If the creator creates
+         *     only one type of check, it can safely ignore this parameter.
+         * \param definition The part of JSON describing the parameters of
+         *     check. As there's no way for the loader to know how the
+         *     parameters might look like, they are not checked in any way.
+         *     Therefore it's up to the creator (or the check being created)
+         *     to validate the data and throw if it is bad.
+         * \param Current loader calling this creator. This can be used
+         *     to load subexpressions in case of compound check.
+         */
+        virtual boost::shared_ptr<Check<Context> > create(
+            const std::string& name, data::ConstElementPtr definition,
+            const Loader<Context, Action>& loader) = 0;
+        /**
+         * \brief Is list or-abbreviation allowed?
+         *
+         * If this returns true and the parameter (eg. the value we check
+         * against, the one that is passed as the second parameter of create)
+         * is list, the loader will call the create method with each element of
+         * the list and aggregate all the results in OR compound check. If it
+         * is false, the parameter is passed verbatim no matter if it is or
+         * isn't a list. For example, IP check will have this as true (so
+         * multiple IP addresses can be passed as options), but AND operator
+         * will return false and handle the list of subexpressions itself.
+         *
+         * The rationale behind this is that it is common to specify list of
+         * something that matches (eg. list of IP addresses).
+         */
+        virtual bool allowListAbbreviation() const {
+            return (true);
+        }
+    };
+    /**
+     * \brief Register another check creator.
+     *
+     * Adds a creator to the list of known ones. The creator's list of names
+     * must be disjoint with the names already known to the creator or the
+     * LoaderError exception is thrown. In such case, the creator is not
+     * registered under any of the names. In case of other exceptions, like
+     * bad_alloc, only weak exception safety is guaranteed.
+     *
+     * \param creator Shared pointer to the creator.
+     * \note We don't support deregistration yet, but it is expected it will
+     *     be needed in future, when we have some kind of plugins. These
+     *     plugins might want to unload, in which case they would need to
+     *     deregister their creators. It is expected they would pass the same
+     *     pointer to such method as they pass here.
+     */
+    void registerCreator(boost::shared_ptr<CheckCreator> creator) {
+        // First check we can insert all the names
+        typedef std::vector<std::string> Strings;
+        const Strings names(creator->names());
+        for (Strings::const_iterator i(names.begin()); i != names.end();
+             ++i) {
+            if (creators_.find(*i) != creators_.end()) {
+                isc_throw(LoaderError, "The loader already contains creator "
+                          "named " << *i);
+            }
+        }
+        // Now insert them
+        for (Strings::const_iterator i(names.begin()); i != names.end();
+             ++i) {
+            creators_[*i] = creator;
+        }
+    }
+    /**
+     * \brief Load a check.
+     *
+     * This parses a check dict (block, the one element of ACL) and calls a
+     * creator (or creators, if more than one check is found inside) for it. It
+     * ignores the "action" key, as it is a reserved keyword used to specify
+     * actions inside the ACL.
+     *
+     * This may throw LoaderError if it is not a dict or if some of the type
+     * names is not known (there's no creator registered for it). The
+     * exceptions from creators aren't caught.
+     *
+     * \param description The JSON description of the check.
+     */
+    boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
+                                                 description) const
+    {
+        // Get the description as a map
+        typedef std::map<std::string, data::ConstElementPtr> Map;
+        Map map;
+        try {
+            map = description->mapValue();
+        }
+        catch (const data::TypeError&) {
+            isc_throw_1(LoaderError, "Check description is not a map",
+                        description);
+        }
+        // Call the internal part with extracted map
+        return (loadCheck(description, map));
+    }
+    /**
+     * \brief Load an ACL.
+     *
+     * This parses an ACL list, creates the checks and actions of each element
+     * and returns it. It may throw LoaderError if it isn't a list or the
+     * "action" key is missing in some element. Also, no exceptions from
+     * loadCheck (therefore from whatever creator is used) and from the
+     * actionLoader passed to constructor are not caught.
+     *
+     * \param description The JSON list of ACL.
+     */
+    boost::shared_ptr<ACL<Context, Action> > load(const data::ConstElementPtr&
+                                                  description) const
+    {
+        // We first check it's a list, so we can use the list reference
+        // (the list may be huge)
+        if (description->getType() != data::Element::list) {
+            isc_throw_1(LoaderError, "ACL not a list", description);
+        }
+        // First create an empty ACL
+        const List &list(description->listValue());
+        boost::shared_ptr<ACL<Context, Action> > result(
+            new ACL<Context, Action>(default_action_));
+        // Run trough the list of elements
+        for (List::const_iterator i(list.begin()); i != list.end(); ++i) {
+            Map map;
+            try {
+                map = (*i)->mapValue();
+            }
+            catch (const data::TypeError&) {
+                isc_throw_1(LoaderError, "ACL element not a map", *i);
+            }
+            // Create an action for the element
+            const Map::const_iterator action(map.find("action"));
+            if (action == map.end()) {
+                isc_throw_1(LoaderError, "No action in ACL element", *i);
+            }
+            const Action acValue(action_loader_(action->second));
+            // Now create the check if there's one
+            if (map.size() >= 2) { // One is the action, another one the check
+                result->append(loadCheck(*i, map), acValue);
+            } else {
+                // In case there's no check, this matches every time. We
+                // simulate it by our own private "True" check.
+                result->append(boost::shared_ptr<Check<Context> >(new True()),
+                               acValue);
+            }
+        }
+        return (result);
+    }
+private:
+    // Some type aliases to save typing
+    typedef std::map<std::string, boost::shared_ptr<CheckCreator> > Creators;
+    typedef std::map<std::string, data::ConstElementPtr> Map;
+    typedef std::vector<data::ConstElementPtr> List;
+    // Private members
+    Creators creators_;
+    const Action default_action_;
+    const boost::function1<Action, data::ConstElementPtr> action_loader_;
+    /**
+     * \brief Internal version of loadCheck.
+     *
+     * This is the internal part, shared between load and loadCheck.
+     * \param description The bit of JSON (used in exceptions).
+     * \param map The extracted map describing the check. It does change
+     *     the map.
+     */
+    boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
+                                                 description, Map& map) const
+    {
+        // Remove the action keyword
+        map.erase("action");
+        // Now, do we have any definition? Or is it and abbreviation?
+        switch (map.size()) {
+            case 0:
+                isc_throw_1(LoaderError, "Check description is empty",
+                            description);
+            case 1: {
+                // Get the first and only item
+                const Map::const_iterator checkDesc(map.begin());
+                const std::string& name(checkDesc->first);
+                const typename Creators::const_iterator
+                    creatorIt(creators_.find(name));
+                if (creatorIt == creators_.end()) {
+                    isc_throw_1(LoaderError, "No creator for ACL check " <<
+                                name, description);
+                }
+                if (creatorIt->second->allowListAbbreviation() &&
+                    checkDesc->second->getType() == data::Element::list) {
+                    // Or-abbreviated form - create an OR and put everything
+                    // inside.
+                    const std::vector<data::ConstElementPtr>&
+                        params(checkDesc->second->listValue());
+                    boost::shared_ptr<LogicOperator<AnyOfSpec, Context> >
+                        oper(new LogicOperator<AnyOfSpec, Context>);
+                    for (std::vector<data::ConstElementPtr>::const_iterator
+                             i(params.begin());
+                         i != params.end(); ++i) {
+                        oper->addSubexpression(
+                            creatorIt->second->create(name, *i, *this));
+                    }
+                    return (oper);
+                }
+                // Create the check and return it
+                return (creatorIt->second->create(name, checkDesc->second,
+                                                  *this));
+            }
+            default: {
+                // This is the AND-abbreviated form. We need to create an
+                // AND (or "ALL") operator, loop trough the whole map and
+                // fill it in. We do a small trick - we create bunch of
+                // single-item maps, call this loader recursively (therefore
+                // it will get into the "case 1" branch, where there is
+                // the actual loading) and use the results to fill the map.
+                //
+                // We keep the description the same, there's nothing we could
+                // take out (we could create a new one, but that would be
+                // confusing, as it is used for error messages only).
+                boost::shared_ptr<LogicOperator<AllOfSpec, Context> >
+                    oper(new LogicOperator<AllOfSpec, Context>);
+                for (Map::const_iterator i(map.begin()); i != map.end(); ++i) {
+                    Map singleSubexpr;
+                    singleSubexpr.insert(*i);
+                    oper->addSubexpression(loadCheck(description,
+                                                     singleSubexpr));
+                }
+                return (oper);
+            }
+        }
+    }
+    /**
+     * \brief Check that always matches.
+     *
+     * This one is used internally for ACL elements without condition. We may
+     * want to make this publicly accesible sometime maybe, but for now,
+     * there's no need.
+     */
+    class True : public Check<Context> {
+    public:
+        virtual bool matches(const Context&) const { return (true); };
+        virtual unsigned cost() const { return (1); }
+        // We don't write "true" here, as this one was created using empty
+        // input
+        virtual std::string toText() const { return ""; }
+    };
+};
+
+}
+}
+
+/*
+ * This include at the end of the file is unusual. But we need to include it,
+ * we use template classes from there. However, they need to be present only
+ * at instantiation of our class, which will happen below this header.
+ *
+ * The problem is, the header uses us as well, therefore there's a circular
+ * dependency. If we loaded it at the beginning and someone loaded us first,
+ * the logic_check header wouldn't have our definitions. This way, no matter
+ * in which order they are loaded, the definitions from this header will be
+ * above the ones from logic_check.
+ */
+#include "logic_check.h"
+
+#endif
diff --git a/src/lib/acl/logic_check.h b/src/lib/acl/logic_check.h
new file mode 100644
index 0000000..6e1c567
--- /dev/null
+++ b/src/lib/acl/logic_check.h
@@ -0,0 +1,206 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOGIC_CHECK_H
+#define ACL_LOGIC_CHECK_H
+
+#include "check.h"
+#include "loader.h"
+
+namespace isc {
+namespace acl {
+
+/// \brief Constants for the AnyOf implementation
+class AnyOfSpec {
+public:
+    static bool start() { return (false); }
+    static bool terminate(const bool another) {
+        return (another);
+    }
+};
+
+/// \brief Constants for the AllOf implementation
+class AllOfSpec {
+public:
+    static bool start() { return (true); }
+    static bool terminate(const bool another) {
+        return (!another);
+    }
+};
+
+/**
+ * \brief Logic operators
+ *
+ * This class implements the AllOf and AnyOf compound checks. As their
+ * behaviour is almost the same, the same template class is used. Which
+ * one it is depends on the Mode template parameter. The Mode should be
+ * one of AnyOfSpec or AllOfSpec, which provide some commands for the
+ * internal implementation. It would be nice to provide typedefs for
+ * them, but it is impossible to do so, as we have the Context template
+ * parameter as well and C++ doesn't like templated typedefs.
+ *
+ * The object holds several subexpressions and returns true if all
+ * of the subexpressions return true (in case of AllOfSpec Mode) or
+ * at last one of them return true (in case of AnyOfSpec Mode). If
+ * some subexpression guarantees the result (eg. some returns false
+ * in case of AllOfSpec), the rest is not tried for performance
+ * reasons.
+ */
+template<typename Mode, typename Context>
+class LogicOperator : public CompoundCheck<Context> {
+public:
+    /**
+     * \brief Add another subexpression.
+     *
+     * This adds another subexpression to the list of checked expressions.
+     * This is usually done shortly after the creation, before using the
+     * check for matches.
+     *
+     * Currently there's no way to place the expression into arbitrary place
+     * or to remove it. It might turn out it would be needed in future to
+     * optimise or it might even turn out we need shared pointers for it.
+     *
+     * \param expr The new expression to put inside.
+     */
+    void addSubexpression(const boost::shared_ptr<Check<Context> >& expr) {
+        checks_.push_back(expr);
+    }
+    /**
+     * \brief The current list of subexpressions.
+     */
+    virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+        typename CompoundCheck<Context>::Checks result;
+        for (typename Checks::const_iterator i(checks_.begin());
+             i != checks_.end(); ++i) {
+            result.push_back(i->get());
+        }
+        return (result);
+    }
+    /**
+     * \brief The match of the check.
+     *
+     * Runs the subexpressions, one by one, and then decides based on that
+     * what to return.
+     */
+    virtual bool matches(const Context& context) const {
+        /*
+         * This might look slightly complicated. However, this is just
+         * generalized version of multi-and or multi-or. The usual
+         * implementation of multi-and starts with true and if one with
+         * false is found, it turns to be false forever and false is
+         * returned. It is exactly the other way around with or.
+         *
+         * So, if we ever find one that makes it the other one than start
+         * (false in case of and, true in case of or), we can just stop and
+         * return that one right away. If it meets no such expression, we
+         * get to the end and return the default.
+         */
+        for (typename Checks::const_iterator i(checks_.begin());
+             i != checks_.end(); ++i) {
+            if (Mode::terminate((*i)->matches(context))) {
+                return (!Mode::start());
+            }
+        }
+        return (Mode::start());
+    }
+private:
+    /// \brief List of subexpressions
+    typedef typename std::vector<boost::shared_ptr<Check<Context> > > Checks;
+    Checks checks_;
+};
+
+/**
+ * \brief Creator for the LogicOperator compound check.
+ *
+ * This class can load the ANY and ALL operators from JSON. They expect
+ * a list of subexpressions as a parameter, eg. like this:
+ *
+ * \verbatim
+ * {"ANY": [
+ *    {"ip": "1.2.3.4"},
+ *    {"ip": "5.6.7.8"}
+ * ]}
+ * \endverbatim
+ *
+ * It uses the loader to load the subexpressions, therefore whatever is
+ * supported there is supported here as well.
+ *
+ * The Mode template parameter has the same meaning as with LogicOperator,
+ * it is used to know which operators to create.
+ */
+template<typename Mode, typename Context, typename Action = BasicAction>
+class LogicCreator : public Loader<Context, Action>::CheckCreator {
+public:
+    /**
+     * \brief Constructor.
+     *
+     * \param name The name for which the loader will work. In practice,
+     *     it will usually be ANY or ALL (depending on the mode), but
+     *     anything else can be used as well.
+     */
+    LogicCreator(const std::string& name) :
+        name_(name)
+    {}
+    /// \brief Returns vector containing the name.
+    virtual std::vector<std::string> names() const {
+        std::vector<std::string> result;
+        result.push_back(name_);
+        return (result);
+    }
+    /**
+     * \brief Converts a JSON description into the logic operator.
+     *
+     * This is the place where the actual loading happens. It creates
+     * the logic operator and calls the loader on each of the list
+     * elements, placing the result into the logic operator.
+     *
+     * The first parameter is ignored and is there only to match interface.
+     *
+     * \param definition The JSON definition of the subexpressions. This must
+     *     be a list (if it isn't, the LoaderError is thrown) and the elements
+     *     must be loadable by the loader (the exceptions from it are not
+     *     caught).
+     * \param loader The loader to use for loading of subexpressions.
+     */
+    virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+                                                      data::ConstElementPtr
+                                                      definition,
+                                                      const Loader<Context,
+                                                      Action>& loader)
+    {
+        std::vector<data::ConstElementPtr> subexprs;
+        try {
+            subexprs = definition->listValue();
+        }
+        catch (const data::TypeError&) {
+            isc_throw_1(LoaderError, "Logic operator takes list", definition);
+        }
+        boost::shared_ptr<LogicOperator<Mode, Context> >
+            result(new LogicOperator<Mode, Context>);
+        for (std::vector<data::ConstElementPtr>::const_iterator
+                 i(subexprs.begin());
+             i != subexprs.end(); ++i) {
+            result->addSubexpression(loader.loadCheck(*i));
+        }
+        return (result);
+    }
+    virtual bool allowListAbbreviation() const { return (false); }
+private:
+    const std::string name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index 5c0cdfc..ae137a8 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -1,15 +1,28 @@
 AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
 
 TESTS =
 if HAVE_GTEST
 TESTS += run_unittests
 run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += check_test.cc acl_test.cc
+run_unittests_SOURCES += acl_test.cc
+run_unittests_SOURCES += check_test.cc
+run_unittests_SOURCES += dns_test.cc
+run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += loader_test.cc
+run_unittests_SOURCES += logcheck.h
+run_unittests_SOURCES += logic_check_test.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 
 run_unittests_LDADD = $(GTEST_LDADD)
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
 endif
 
 noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/acl/tests/acl_test.cc b/src/lib/acl/tests/acl_test.cc
index 36baff6..5829fe7 100644
--- a/src/lib/acl/tests/acl_test.cc
+++ b/src/lib/acl/tests/acl_test.cc
@@ -12,75 +12,11 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include <gtest/gtest.h>
-#include <acl/acl.h>
-#include <cassert>
-
-using namespace isc::acl;
-using boost::shared_ptr;
+#include "logcheck.h"
 
 namespace {
 
-// This is arbitrary guess of size for the log. If it's too small for your
-// test, just make it bigger.
-const size_t LOG_SIZE = 10;
-
-// This will remember which checks did run already.
-struct Log {
-    // The actual log cells, if i-th check did run
-    mutable bool run[LOG_SIZE];
-    Log() {
-        // Nothing run yet
-        for (size_t i(0); i < LOG_SIZE; ++i) {
-            run[i] = false;
-        }
-    }
-    // Checks that the first amount of checks did run and the rest didn't.
-    void checkFirst(size_t amount) const {
-        ASSERT_LE(amount, LOG_SIZE) << "Wrong test: amount bigger than size "
-            "of log";
-        {
-            SCOPED_TRACE("Checking that the first amount of checks did run");
-            for (size_t i(0); i < amount; ++i) {
-                EXPECT_TRUE(run[i]) << "Check #" << i << " did not run.";
-            }
-        }
-
-        {
-            SCOPED_TRACE("Checking that the rest did not run");
-            for (size_t i(amount); i < LOG_SIZE; ++i) {
-                EXPECT_FALSE(run[i]) << "Check #" << i << "did run.";
-            }
-        }
-    }
-};
-
-// This returns true or false every time, no matter what is passed to it.
-// But it logs that it did run.
-class ConstCheck : public Check<Log> {
-public:
-    ConstCheck(bool accepts, size_t log_num) :
-        log_num_(log_num),
-        accepts_(accepts)
-    {
-        assert(log_num < LOG_SIZE); // If this fails, the LOG_SIZE is too small
-    }
-    /*
-     * This use of mutable log context is abuse for testing purposes.
-     * It is expected that the context will not be modified in the real
-     * applications of ACLs, but we want to know which checks were called
-     * and this is an easy way.
-     */
-    virtual bool matches(const Log& log) const {
-        log.run[log_num_] = true;
-        return (accepts_);
-    }
-private:
-    size_t log_num_;
-    bool accepts_;
-};
-
-// Test version of the ACL class. It adds few methods to examine the protected
+// Test version of the Acl class. It adds few methods to examine the protected
 // data, but does not change the implementation.
 class TestACL : public ACL<Log> {
 public:
diff --git a/src/lib/acl/tests/creators.h b/src/lib/acl/tests/creators.h
new file mode 100644
index 0000000..85f3444
--- /dev/null
+++ b/src/lib/acl/tests/creators.h
@@ -0,0 +1,154 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is not a public header, but some code shared between tests
+// This one contains various creators to test the loader and other creators
+
+#ifndef CREATORS_H
+#define CREATORS_H
+
+#include "logcheck.h"
+#include <acl/loader.h>
+#include <string>
+
+using isc::data::ConstElementPtr;
+using namespace std;
+using namespace boost;
+
+namespace {
+
+// Just for convenience, create JSON objects from JSON string
+ConstElementPtr el(const string& JSON) {
+    return (isc::data::Element::fromJSON(JSON));
+}
+
+// A check that doesn't check anything but remembers it's own name
+// and data
+class NamedCheck : public Check<Log> {
+public:
+    NamedCheck(const string& name, ConstElementPtr data) :
+        name_(name),
+        data_(data)
+    {}
+    virtual bool matches(const Log&) const { return (true); }
+    const string name_;
+    const ConstElementPtr data_;
+};
+
+// The creator of NamedCheck
+class NamedCreator : public Loader<Log>::CheckCreator {
+public:
+    NamedCreator(const string& name, bool abbreviatedList = true) :
+        abbreviated_list_(abbreviatedList)
+    {
+        names_.push_back(name);
+    }
+    NamedCreator(const vector<string>& names) :
+        names_(names),
+        abbreviated_list_(true)
+    {}
+    vector<string> names() const {
+        return (names_);
+    }
+    shared_ptr<Check<Log> > create(const string& name, ConstElementPtr data,
+                                   const Loader<Log>&)
+    {
+        bool found(false);
+        for (vector<string>::const_iterator i(names_.begin());
+             i != names_.end(); ++i) {
+            if (*i == name) {
+                found = true;
+                break;
+            }
+        }
+        EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
+            "doesn't handle it.";
+        return (shared_ptr<Check<Log> >(new NamedCheck(name, data)));
+    }
+    bool allowListAbbreviation() const {
+        return (abbreviated_list_);
+    }
+private:
+    vector<string> names_;
+    const bool abbreviated_list_;
+};
+
+// To be thrown in tests internally
+class TestCreatorError {};
+
+// This will throw every time it should create something
+class ThrowCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("throw");
+        return (result);
+    }
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
+                                   const Loader<Log>&)
+    {
+        throw TestCreatorError();
+    }
+};
+
+// This throws whenever the match is called on it
+class ThrowCheck : public Check<Log> {
+public:
+    virtual bool matches(const Log&) const {
+        throw TestCreatorError();
+    }
+};
+
+// And creator for it
+class ThrowCheckCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("throwcheck");
+        return (result);
+    }
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
+                                   const Loader<Log>&)
+    {
+        return (shared_ptr<Check<Log> >(new ThrowCheck()));
+    }
+};
+
+class LogCreator : public Loader<Log>::CheckCreator {
+public:
+    vector<string> names() const {
+        vector<string> result;
+        result.push_back("logcheck");
+        return (result);
+    }
+    /*
+     * For simplicity, we just take two values as a list, first is the
+     * logging cell used, the second is result of the check. No error checking
+     * is done, if there's bug in the test, it will throw TypeError for us.
+     */
+    shared_ptr<Check<Log> > create(const string&, ConstElementPtr definition,
+                                   const Loader<Log>&)
+    {
+        vector<ConstElementPtr> list(definition->listValue());
+        int logpos(list[0]->intValue());
+        bool accept(list[1]->boolValue());
+        return (shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
+    }
+    // We take a list, so don't interpret it for us
+    virtual bool allowListAbbreviation() const { return (false); }
+};
+
+}
+
+#endif
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
new file mode 100644
index 0000000..e5e0f3a
--- /dev/null
+++ b/src/lib/acl/tests/dns_test.cc
@@ -0,0 +1,35 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <acl/dns.h>
+#include <gtest/gtest.h>
+
+using namespace isc::acl::dns;
+
+namespace {
+
+// Tests that the getLoader actually returns something, returns the same every
+// time and the returned value can be used to anything. It is not much of a
+// test, but the getLoader is not much of a function.
+TEST(DNSACL, getLoader) {
+    Loader* l(&getLoader());
+    ASSERT_TRUE(l != NULL);
+    EXPECT_EQ(l, &getLoader());
+    EXPECT_NO_THROW(l->load(isc::data::Element::fromJSON(
+        "[{\"action\": \"DROP\"}]")));
+    // TODO Test that the things we should register by default, like IP based
+    // check, are loaded.
+}
+
+}
diff --git a/src/lib/acl/tests/ip_check_unittest.cc b/src/lib/acl/tests/ip_check_unittest.cc
new file mode 100644
index 0000000..3fcb05b
--- /dev/null
+++ b/src/lib/acl/tests/ip_check_unittest.cc
@@ -0,0 +1,588 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+#include <acl/ip_check.h>
+
+using namespace isc::acl;
+using namespace isc::acl::internal;
+using namespace std;
+
+namespace {
+const size_t IPV4_SIZE = 4;
+const size_t IPV6_SIZE = 16;
+
+// Simple struct holding either an IPV4 or IPV6 address.  This is the "Context"
+// used for the tests.
+//
+// The structure is also used for converting an IPV4 address to a four-byte
+// array.
+struct GeneralAddress {
+    int             family;     // Family of the address
+    vector<uint8_t> addr;       // Address type.  Size indicates what it holds
+
+    // Convert uint32_t address in host-byte order to a uint8_t vector in
+    // network-byte order.
+    vector<uint8_t> convertUint32(uint32_t address) {
+        BOOST_STATIC_ASSERT(sizeof(uint32_t) == IPV4_SIZE);
+
+        vector<uint8_t> result(IPV4_SIZE);
+
+        // Address is in network-byte order, so copy to the array.  The
+        // MS byte is at the lowest address.
+        result[3] = address & 0xff;
+        result[2] = (address >> 8) & 0xff;
+        result[1] = (address >> 16) & 0xff;
+        result[0] = (address >> 24) & 0xff;
+
+        return (result);
+    }
+
+    // Convenience constructor for V4 address.  As it is not marked as explicit,
+    // it allows the automatic promotion of a uint32_t to a GeneralAddress data
+    // type in calls to matches().
+    GeneralAddress(uint32_t address) : family(AF_INET), addr()
+    {
+        addr = convertUint32(address);
+    }
+
+    // Convenience constructor for V6 address.  As it is not marked as explicit,
+    // it allows the automatic promotion of a vector<uint8_t> to a
+    // GeneralAddress data type in calls to matches().
+    GeneralAddress(const vector<uint8_t>& address) : family(AF_INET6),
+                                                     addr(address)
+    {
+        if (address.size() != IPV6_SIZE) {
+            isc_throw(isc::InvalidParameter, "vector passed to GeneralAddress "
+                      "constructor is " << address.size() << " bytes long - it "
+                      "should be " << IPV6_SIZE << " bytes instead");
+        }
+    }
+
+    // A couple of convenience methods for checking equality with different
+    // representations of an address.
+
+    // Check that the IPV4 address is the same as that given.
+    bool equals(uint32_t address) {
+        if (family == AF_INET) {
+            const vector<uint8_t> byte_address = convertUint32(address);
+            return (equal(byte_address.begin(), byte_address.end(),
+                           addr.begin()));
+        }
+        return (false);
+    }
+
+    // Check that the array is equal to that given.
+    bool equals(const vector<uint8_t>& byte_address) {
+        if (addr.size() == byte_address.size()) {
+            return (equal(byte_address.begin(), byte_address.end(),
+                           addr.begin()));
+        }
+        return (false);
+    }
+};
+} // Unnamed namespace
+
+// Provide a specialisation of the IPCheck::matches() method for the
+// GeneralAddress class.
+
+namespace isc  {
+namespace acl {
+template <>
+bool IPCheck<GeneralAddress>::matches(const GeneralAddress& address) const {
+    return (compare(&address.addr[0], address.family));
+}
+} // namespace acl
+} // namespace isc
+
+namespace {
+/// *** Free Function Tests ***
+
+// Test the createMask() function.
+TEST(IPFunctionCheck, CreateMask) {
+
+    // Invalid arguments should throw.
+    EXPECT_THROW(createMask(9), isc::OutOfRange);
+
+    // Check on all possible 8-bit values.
+    uint16_t expected = 0xff00;
+    for (size_t i = 0; i <= 8; ++i, expected >>= 1) {
+        EXPECT_EQ(static_cast<uint8_t>(expected & 0xff), createMask(i));
+    }
+}
+
+// Test the splitIPAddress() function.
+TEST(IPFunctionCheck, SplitIPAddress) {
+    pair<string, uint32_t> result;
+
+    result = splitIPAddress("192.0.2.1");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(-1, result.second);
+
+    result = splitIPAddress("192.0.2.1/24");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(24, result.second);
+
+    result = splitIPAddress("2001:db8::/128");
+    EXPECT_EQ(string("2001:db8::"), result.first);
+    EXPECT_EQ(128, result.second);
+
+    result = splitIPAddress("192.0.2.1/0");
+    EXPECT_EQ(string("192.0.2.1"), result.first);
+    EXPECT_EQ(0, result.second);
+
+    EXPECT_THROW(splitIPAddress("192.0.2.43/27 "), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43/-1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43//1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("192.0.2.43/1/"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("/192.0.2.43/1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("2001:db8::/xxxx"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("2001:db8::/32/s"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("1/"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress("/1"), isc::InvalidParameter);
+    EXPECT_THROW(splitIPAddress(" 1/ "), isc::InvalidParameter);
+}
+
+// *** IPv4 Tests ***
+
+TEST(IPCheck, V4StringConstructor) {
+
+    // Constructor with no prefix length given (32 is assumed).
+    IPCheck<GeneralAddress> acl1("192.0.2.255");
+    EXPECT_EQ(32, acl1.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl1.getFamily());
+
+    vector<uint8_t> stored1 = acl1.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored1.size());
+    GeneralAddress expected1(0xc00002ff);
+    EXPECT_TRUE(expected1.equals(stored1));
+
+    // Constructor with valid mask given
+    IPCheck<GeneralAddress> acl2("192.0.2.0/24");
+    EXPECT_EQ(24, acl2.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl2.getFamily());
+
+    vector<uint8_t> stored2 = acl2.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored2.size());
+    GeneralAddress expected2(0xc0000200);
+    EXPECT_TRUE(expected2.equals(stored2));
+
+    // More valid masks
+    IPCheck<GeneralAddress> acl3("192.0.2.1/0");
+    EXPECT_EQ(0, acl3.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl3.getFamily());
+
+    vector<uint8_t> stored3 = acl3.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored3.size());
+    GeneralAddress expected3(0xc0000201);
+    EXPECT_TRUE(expected3.equals(stored3));
+
+    IPCheck<GeneralAddress> acl4("192.0.2.2/32");
+    EXPECT_EQ(32, acl4.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl4.getFamily());
+
+    vector<uint8_t> stored4 = acl4.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored4.size());
+    GeneralAddress expected4(0xc0000202);
+    EXPECT_TRUE(expected4.equals(stored4));
+
+    // Any match
+    IPCheck<GeneralAddress> acl5("any4");
+    EXPECT_EQ(0, acl5.getPrefixlen());
+    EXPECT_EQ(AF_INET, acl5.getFamily());
+
+    vector<uint8_t> stored5 = acl5.getAddress();
+    EXPECT_EQ(IPV4_SIZE, stored5.size());
+    GeneralAddress expected5(0);
+    EXPECT_TRUE(expected5.equals(stored5));
+
+    // Invalid prefix lengths
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/33"), isc::OutOfRange);
+
+    // ... and invalid strings
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/-1"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/24/3"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/ww"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("aa.255.255.0/ww"),
+                 isc::InvalidParameter);
+}
+
+TEST(IPCheck, V4CopyConstructor) {
+    IPCheck<GeneralAddress> acl1("192.0.2.1/24");
+    IPCheck<GeneralAddress> acl2(acl1);
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+    EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+    vector<uint8_t> net1 = acl1.getMask();
+    vector<uint8_t> net2 = acl2.getMask();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+    net1 = acl1.getAddress();
+    net2 = acl2.getAddress();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+TEST(IPCheck, V4AssignmentOperator) {
+    IPCheck<GeneralAddress> acl1("192.0.2.0/24");
+    IPCheck<GeneralAddress> acl2("192.0.2.128/25");
+    acl2 = acl1;
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+    EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+    vector<uint8_t> net1 = acl1.getMask();
+    vector<uint8_t> net2 = acl2.getMask();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+    net1 = acl1.getAddress();
+    net2 = acl2.getAddress();
+    EXPECT_EQ(net1.size(), net2.size());
+    EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+// Check that the comparison works - note that "matches" just calls the
+// internal compare() code. (Also note that the argument to matches() will be
+// automatically converted to the GeneralAddress data type used for the tests
+// because of its constructor taking a uint32_t argument.
+
+TEST(IPCheck, V4Compare) {
+    // Exact address - match if given address matches stored address.
+    IPCheck<GeneralAddress> acl1("192.0.2.255/32");
+    EXPECT_TRUE(acl1.matches(0xc00002ff));
+    EXPECT_FALSE(acl1.matches(0xc00002fe));
+    EXPECT_FALSE(acl1.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl2("192.0.2.255/27");
+    EXPECT_TRUE(acl2.matches(0xc00002ff));
+    EXPECT_TRUE(acl2.matches(0xc00002fe));
+    EXPECT_TRUE(acl2.matches(0xc00002ee));
+    EXPECT_FALSE(acl2.matches(0xc00002de));
+    EXPECT_FALSE(acl2.matches(0xd00002fe));
+    EXPECT_FALSE(acl2.matches(0x13457f13));
+
+    // Match if "any4" is specified
+    IPCheck<GeneralAddress> acl3("any4");
+    EXPECT_TRUE(acl3.matches(0xc00002ff));
+    EXPECT_TRUE(acl3.matches(0xc00002fe));
+    EXPECT_TRUE(acl3.matches(0xc00002ee));
+    EXPECT_TRUE(acl3.matches(0xc00002de));
+    EXPECT_TRUE(acl3.matches(0xd00002fe));
+    EXPECT_TRUE(acl3.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl4("0.0.0.0/0");
+    EXPECT_TRUE(acl4.matches(0xc00002ff));
+    EXPECT_TRUE(acl4.matches(0xc00002fe));
+    EXPECT_TRUE(acl4.matches(0xc00002ee));
+    EXPECT_TRUE(acl4.matches(0xc00002de));
+    EXPECT_TRUE(acl4.matches(0xd00002fe));
+    EXPECT_TRUE(acl4.matches(0x13457f13));
+
+    IPCheck<GeneralAddress> acl5("192.0.2.255/0");
+    EXPECT_TRUE(acl5.matches(0xc00002ff));
+    EXPECT_TRUE(acl5.matches(0xc00002fe));
+    EXPECT_TRUE(acl5.matches(0xc00002ee));
+    EXPECT_TRUE(acl5.matches(0xc00002de));
+    EXPECT_TRUE(acl5.matches(0xd00002fe));
+    EXPECT_TRUE(acl5.matches(0x13457f13));
+}
+
+// *** IPV6 Tests ***
+
+// Some constants used in the tests
+
+const char* V6ADDR_1_STRING = "2001:0db8:1122:3344:5566:7788:99aa:bbcc";
+const uint8_t V6ADDR_1[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x11, 0x22, 0x33, 0x44,
+    0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc
+};
+
+const char* V6ADDR_2_STRING = "2001:0db8::dead:beef";
+const uint8_t V6ADDR_2[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 48 bits
+const uint8_t V6ADDR_2_48[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0xff, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 49 bits
+const uint8_t V6ADDR_2_49[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x7f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 50 bits
+const uint8_t V6ADDR_2_50[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x3f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_51[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x1f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_52[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x0f, 0x66,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 127 bits
+const uint8_t V6ADDR_2_127[] = {
+    0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xee
+};
+
+const uint8_t V6ADDR_3[] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const uint8_t V6ADDR_4[] = {
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+TEST(IPCheck, V6StringConstructor) {
+    IPCheck<GeneralAddress> acl1(V6ADDR_1_STRING);
+    vector<uint8_t> address = acl1.getAddress();
+
+    EXPECT_EQ(128, acl1.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl1.getFamily());
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_1));
+
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/51"));
+    address = acl2.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(51, acl2.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl2.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+    IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/127"));
+    address = acl3.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(127, acl3.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl3.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+    IPCheck<GeneralAddress> acl4("::1");
+    address = acl4.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(128, acl4.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl4.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_3));
+
+    // Any match.  In these cases, the address should all be zeroes.
+    IPCheck<GeneralAddress> acl5("any6");
+    address = acl5.getAddress();
+    EXPECT_EQ(IPV6_SIZE, address.size());
+    EXPECT_EQ(0, acl5.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl5.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+    IPCheck<GeneralAddress> acl6("::/0");
+    address = acl6.getAddress();
+    EXPECT_EQ(0, acl6.getPrefixlen());
+    EXPECT_EQ(AF_INET6, acl6.getFamily());
+    EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+    // Some invalid strings
+    EXPECT_THROW(IPCheck<GeneralAddress>("::1/129"), isc::OutOfRange);
+    EXPECT_THROW(IPCheck<GeneralAddress>("::1/24/3"), isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>(":::1/24"), isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("2001:0db8::abcd/ww"),
+                 isc::InvalidParameter);
+    EXPECT_THROW(IPCheck<GeneralAddress>("2xx1:0db8::abcd/32"),
+                 isc::InvalidParameter);
+}
+
+TEST(IPCheck, V6CopyConstructor) {
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+    IPCheck<GeneralAddress> acl2(acl1);
+
+    vector<uint8_t> acl1_address = acl1.getAddress();
+    vector<uint8_t> acl2_address = acl1.getAddress();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+    EXPECT_EQ(acl1_address.size(), acl2_address.size());
+    EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+                acl2_address.begin()));
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+    vector<uint8_t> acl1_mask = acl1.getMask();
+    vector<uint8_t> acl2_mask = acl1.getMask();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+    EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+    EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+                acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6AssignmentOperator) {
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_1_STRING) + string("/48"));
+
+    acl2 = acl1;
+
+    vector<uint8_t> acl1_address = acl1.getAddress();
+    vector<uint8_t> acl2_address = acl2.getAddress();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+    EXPECT_EQ(acl1_address.size(), acl2_address.size());
+    EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+                acl2_address.begin()));
+
+    EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+    vector<uint8_t> acl1_mask = acl1.getMask();
+    vector<uint8_t> acl2_mask = acl2.getMask();
+    EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+    EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+    EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+                acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6Compare) {
+    // Set up some data.
+    vector<uint8_t> v6addr_2(V6ADDR_2, V6ADDR_2 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_48(V6ADDR_2_48, V6ADDR_2_48 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_49(V6ADDR_2_49, V6ADDR_2_49 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_50(V6ADDR_2_50, V6ADDR_2_50 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_51(V6ADDR_2_51, V6ADDR_2_51 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_52(V6ADDR_2_52, V6ADDR_2_52 + IPV6_SIZE);
+    vector<uint8_t> v6addr_2_127(V6ADDR_2_127, V6ADDR_2_127 + IPV6_SIZE);
+    vector<uint8_t> v6addr_3(V6ADDR_3, V6ADDR_3 + IPV6_SIZE);
+
+    // Exact address - match if given address matches stored address.
+    IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/128"));
+    EXPECT_TRUE(acl1.matches(v6addr_2));
+    EXPECT_FALSE(acl1.matches(v6addr_2_127));
+    EXPECT_FALSE(acl1.matches(v6addr_2_52));
+    EXPECT_FALSE(acl1.matches(v6addr_2_51));
+    EXPECT_FALSE(acl1.matches(v6addr_2_50));
+    EXPECT_FALSE(acl1.matches(v6addr_2_49));
+    EXPECT_FALSE(acl1.matches(v6addr_2_48));
+    EXPECT_FALSE(acl1.matches(v6addr_3));
+
+    // Match to various prefixes.
+    IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/127"));
+    EXPECT_TRUE(acl2.matches(v6addr_2));
+    EXPECT_TRUE(acl2.matches(v6addr_2_127));
+    EXPECT_FALSE(acl2.matches(v6addr_2_52));
+    EXPECT_FALSE(acl2.matches(v6addr_2_51));
+    EXPECT_FALSE(acl2.matches(v6addr_2_50));
+    EXPECT_FALSE(acl2.matches(v6addr_2_49));
+    EXPECT_FALSE(acl2.matches(v6addr_2_48));
+    EXPECT_FALSE(acl2.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/52"));
+    EXPECT_TRUE(acl3.matches(v6addr_2));
+    EXPECT_TRUE(acl3.matches(v6addr_2_127));
+    EXPECT_TRUE(acl3.matches(v6addr_2_52));
+    EXPECT_FALSE(acl3.matches(v6addr_2_51));
+    EXPECT_FALSE(acl3.matches(v6addr_2_50));
+    EXPECT_FALSE(acl3.matches(v6addr_2_49));
+    EXPECT_FALSE(acl3.matches(v6addr_2_48));
+    EXPECT_FALSE(acl3.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl4(string(V6ADDR_2_STRING) + string("/51"));
+    EXPECT_TRUE(acl4.matches(v6addr_2));
+    EXPECT_TRUE(acl4.matches(v6addr_2_127));
+    EXPECT_TRUE(acl4.matches(v6addr_2_52));
+    EXPECT_TRUE(acl4.matches(v6addr_2_51));
+    EXPECT_FALSE(acl4.matches(v6addr_2_50));
+    EXPECT_FALSE(acl4.matches(v6addr_2_49));
+    EXPECT_FALSE(acl4.matches(v6addr_2_48));
+    EXPECT_FALSE(acl4.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl5(string(V6ADDR_2_STRING) + string("/50"));
+    EXPECT_TRUE(acl5.matches(v6addr_2));
+    EXPECT_TRUE(acl5.matches(v6addr_2_127));
+    EXPECT_TRUE(acl5.matches(v6addr_2_52));
+    EXPECT_TRUE(acl5.matches(v6addr_2_51));
+    EXPECT_TRUE(acl5.matches(v6addr_2_50));
+    EXPECT_FALSE(acl5.matches(v6addr_2_49));
+    EXPECT_FALSE(acl5.matches(v6addr_2_48));
+    EXPECT_FALSE(acl5.matches(v6addr_3));
+
+    IPCheck<GeneralAddress> acl6(string(V6ADDR_2_STRING) + string("/0"));
+    EXPECT_TRUE(acl6.matches(v6addr_2));
+    EXPECT_TRUE(acl6.matches(v6addr_2_127));
+    EXPECT_TRUE(acl6.matches(v6addr_2_52));
+    EXPECT_TRUE(acl6.matches(v6addr_2_51));
+    EXPECT_TRUE(acl6.matches(v6addr_2_50));
+    EXPECT_TRUE(acl6.matches(v6addr_2_49));
+    EXPECT_TRUE(acl6.matches(v6addr_2_48));
+    EXPECT_TRUE(acl6.matches(v6addr_3));
+
+    // Match on any address
+    IPCheck<GeneralAddress> acl7("any6");
+    EXPECT_TRUE(acl7.matches(v6addr_2));
+    EXPECT_TRUE(acl7.matches(v6addr_2_127));
+    EXPECT_TRUE(acl7.matches(v6addr_2_52));
+    EXPECT_TRUE(acl7.matches(v6addr_2_51));
+    EXPECT_TRUE(acl7.matches(v6addr_2_50));
+    EXPECT_TRUE(acl7.matches(v6addr_2_49));
+    EXPECT_TRUE(acl7.matches(v6addr_2_48));
+}
+
+// *** Mixed-mode tests - mainly to check that no exception is thrown ***
+
+TEST(IPCheck, MixedMode) {
+
+    // ACL has a V4 address specified, check against a V6 address.
+    IPCheck<GeneralAddress> acl1("192.0.2.255/24");
+    GeneralAddress test1(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+    EXPECT_NO_THROW(acl1.matches(test1));
+    EXPECT_FALSE(acl1.matches(test1));
+
+    // Now the reverse - the ACL is specified with a V6 address.
+    IPCheck<GeneralAddress> acl2(V6ADDR_2_STRING);
+    GeneralAddress test2(0x12345678);
+    EXPECT_FALSE(acl2.matches(test2));
+
+    // Ensure only a V4 address matches "any4".
+    IPCheck<GeneralAddress> acl3("any4");
+    EXPECT_FALSE(acl3.matches(test1));
+    EXPECT_TRUE(acl3.matches(test2));
+
+    // ... and check the reverse
+    IPCheck<GeneralAddress> acl4("any6");
+    EXPECT_TRUE(acl4.matches(test1));
+    EXPECT_FALSE(acl4.matches(test2));
+
+    // Check where the bit pattern of an IPv4 address matches that of an IPv6
+    // one.
+    IPCheck<GeneralAddress> acl5("2001:db8::/32");
+    GeneralAddress test5(0x20010db8);
+    EXPECT_FALSE(acl5.matches(test5));
+
+    // ... and where the reverse is true. (2001:db8 corresponds to 32.1.13.184).
+    IPCheck<GeneralAddress> acl6("32.1.13.184");
+    GeneralAddress test6(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+    EXPECT_FALSE(acl6.matches(test6));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/loader_test.cc b/src/lib/acl/tests/loader_test.cc
new file mode 100644
index 0000000..7dc088d
--- /dev/null
+++ b/src/lib/acl/tests/loader_test.cc
@@ -0,0 +1,371 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/loader.h>
+#include <string>
+#include <gtest/gtest.h>
+
+using namespace std;
+using namespace boost;
+
+namespace {
+
+// We don't use the EXPECT_THROW macro, as it doesn't allow us
+// to examine the exception. We want to check the element is stored
+// there as well.
+void testActionLoaderException(const string& JSON) {
+    SCOPED_TRACE("Should throw with input: " + JSON);
+    ConstElementPtr elem(el(JSON));
+    try {
+        defaultActionLoader(elem);
+        FAIL() << "It did not throw";
+    }
+    catch (const LoaderError& error) {
+        // Yes, comparing for pointer equality, that is enough, it
+        // should return the exact instance of the JSON object
+        EXPECT_EQ(elem, error.element());
+    }
+}
+
+// Test the defaultActionLoader function
+TEST(LoaderHelpers, DefaultActionLoader) {
+    // First the three valid inputs
+    EXPECT_EQ(ACCEPT, defaultActionLoader(el("\"ACCEPT\"")));
+    EXPECT_EQ(REJECT, defaultActionLoader(el("\"REJECT\"")));
+    EXPECT_EQ(DROP, defaultActionLoader(el("\"DROP\"")));
+    // Now few invalid ones
+    // String, but unknown one
+    testActionLoaderException("\"UNKNOWN\"");
+    testActionLoaderException("42");
+    testActionLoaderException("true");
+    testActionLoaderException("null");
+    testActionLoaderException("[]");
+    testActionLoaderException("{}");
+}
+
+class LoaderTest : public ::testing::Test {
+public:
+    LoaderTest() :
+        loader_(REJECT)
+    {}
+    Loader<Log> loader_;
+    Log log_;
+    // Some convenience functions to set up
+
+    // Create a NamedCreator, convert to shared pointer
+    shared_ptr<NamedCreator> namedCreator(const string& name,
+                                          bool abbreviatedList = true)
+    {
+        return (shared_ptr<NamedCreator>(new NamedCreator(name,
+                                                          abbreviatedList)));
+    }
+    // Create and add a NamedCreator
+    void addNamed(const string& name, bool abbreviatedList = true) {
+        EXPECT_NO_THROW(loader_.registerCreator(
+            namedCreator(name, abbreviatedList)));
+    }
+    template<class Result> shared_ptr<Result> loadCheckAny(const string&
+                                                               definition)
+    {
+        SCOPED_TRACE("Loading check " + definition);
+        shared_ptr<Check<Log> > loaded;
+        EXPECT_NO_THROW(loaded = loader_.loadCheck(el(definition)));
+        shared_ptr<Result> result(dynamic_pointer_cast<Result>(
+            loaded));
+        EXPECT_TRUE(result);
+        return (result);
+    }
+    // Load a check and convert it to named check to examine it
+    shared_ptr<NamedCheck> loadCheck(const string& definition) {
+        return (loadCheckAny<NamedCheck>(definition));
+    }
+    // The loadCheck throws an exception
+    void checkException(const string& JSON) {
+        SCOPED_TRACE("Loading check exception: " + JSON);
+        ConstElementPtr input(el(JSON));
+        // Not using EXPECT_THROW, we want to examine the exception
+        try {
+            loader_.loadCheck(input);
+            FAIL() << "Should have thrown";
+        }
+        catch (const LoaderError& e) {
+            // It should be identical copy, so checking pointers
+            EXPECT_EQ(input, e.element());
+        }
+    }
+    // Insert the throw, throwcheck and logcheck checks into the loader
+    void aclSetup() {
+        try {
+            loader_.registerCreator(shared_ptr<ThrowCreator>(new
+                                                             ThrowCreator()));
+            loader_.registerCreator(shared_ptr<ThrowCheckCreator>(
+                new ThrowCheckCreator()));
+            loader_.registerCreator(shared_ptr<LogCreator>(new LogCreator()));
+        }
+        // We ignore this exception here, because it happens when we try to
+        // insert the creators multiple times. This is harmless.
+        catch (const LoaderError&) {}
+    }
+    // Create an ACL, run it, check it's result and how many first
+    // log items it marked
+    //
+    // Works with preset names throw and logcheck
+    void aclRun(const string& JSON, BasicAction expectedResult,
+                size_t logged)
+    {
+        SCOPED_TRACE("Running ACL for " + JSON);
+        aclSetup();
+        shared_ptr<ACL<Log> > acl;
+        EXPECT_NO_THROW(acl = loader_.load(el(JSON)));
+        EXPECT_EQ(expectedResult, acl->execute(log_));
+        log_.checkFirst(logged);
+    }
+    // Check it throws an error when creating the ACL
+    void aclException(const string& JSON) {
+        SCOPED_TRACE("Trying to load bad " + JSON);
+        aclSetup();
+        EXPECT_THROW(loader_.load(el(JSON)), LoaderError);
+    }
+    // Check that the subexpression is NamedCheck with correct data
+    void isSubexprNamed(const CompoundCheck<Log>* compound, size_t index,
+                        const string& name, ConstElementPtr data)
+    {
+        if (index < compound->getSubexpressions().size()) {
+            const NamedCheck*
+                check(dynamic_cast<const NamedCheck*>(compound->
+                                                      getSubexpressions()
+                                                      [index]));
+            ASSERT_TRUE(check) << "The subexpression is of different type";
+            EXPECT_EQ(name, check->name_);
+            EXPECT_TRUE(data->equals(*check->data_));
+        }
+    }
+};
+
+// Test that it does not accept duplicate creator
+TEST_F(LoaderTest, CreatorDuplicity) {
+    addNamed("name");
+    EXPECT_THROW(loader_.registerCreator(namedCreator("name")), LoaderError);
+}
+
+// Test that when it does not accept a duplicate, nothing is inserted
+TEST_F(LoaderTest, CreatorDuplicateUnchanged) {
+    addNamed("name1");
+    vector<string> names;
+    names.push_back("name2");
+    names.push_back("name1");
+    names.push_back("name3");
+    EXPECT_THROW(loader_.registerCreator(
+        shared_ptr<NamedCreator>(new NamedCreator(names))), LoaderError);
+    // It should now reject both name2 and name3 as not known
+    checkException("{\"name2\": null}");
+    checkException("{\"name3\": null}");
+}
+
+// Test that we can register a creator and load a check with the name
+TEST_F(LoaderTest, SimpleCheckLoad) {
+    addNamed("name");
+    shared_ptr<NamedCheck> check(loadCheck("{\"name\": 42}"));
+    EXPECT_EQ("name", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("42")));
+}
+
+// As above, but there are multiple creators registered within the loader
+TEST_F(LoaderTest, MultiCreatorCheckLoad) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<NamedCheck> check(loadCheck("{\"name2\": 42}"));
+    EXPECT_EQ("name2", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("42")));
+}
+
+// Similar to above, but there's a creator with multiple names
+TEST_F(LoaderTest, MultiNameCheckLoad) {
+    addNamed("name1");
+    vector<string> names;
+    names.push_back("name2");
+    names.push_back("name3");
+    EXPECT_NO_THROW(loader_.registerCreator(shared_ptr<NamedCreator>(
+        new NamedCreator(names))));
+    shared_ptr<NamedCheck> check(loadCheck("{\"name3\": 42}"));
+    EXPECT_EQ("name3", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("42")));
+}
+
+// Invalid format is rejected
+TEST_F(LoaderTest, InvalidFormatCheck) {
+    checkException("[]");
+    checkException("42");
+    checkException("\"hello\"");
+    checkException("null");
+}
+
+// Empty check is rejected
+TEST_F(LoaderTest, EmptyCheck) {
+    checkException("{}");
+}
+
+// The name isn't known
+TEST_F(LoaderTest, UnkownName) {
+    checkException("{\"unknown\": null}");
+}
+
+// Exception from the creator is propagated
+TEST_F(LoaderTest, CheckPropagate) {
+    loader_.registerCreator(shared_ptr<ThrowCreator>(new ThrowCreator()));
+    EXPECT_THROW(loader_.loadCheck(el("{\"throw\": null}")), TestCreatorError);
+}
+
+// The abbreviated form of check
+TEST_F(LoaderTest, AndAbbrev) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": 2}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        EXPECT_EQ(2, oper->getSubexpressions().size());
+        // Note: this test relies on the ordering in which map returns it's
+        // elements, which is in the lexicographical order of the strings.
+        // This is not required from our interface, but is easier to write
+        // the test.
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        isSubexprNamed(&*oper, 1, "name2", el("2"));
+    }
+}
+
+// The abbreviated form of parameters
+TEST_F(LoaderTest, OrAbbrev) {
+    addNamed("name1");
+    shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AnyOfSpec, Log> >("{\"name1\": [1, 2]}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        EXPECT_EQ(2, oper->getSubexpressions().size());
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        isSubexprNamed(&*oper, 1, "name1", el("2"));
+    }
+}
+
+// Combined abbreviated form, both at once
+
+// The abbreviated form of check
+TEST_F(LoaderTest, BothAbbrev) {
+    addNamed("name1");
+    addNamed("name2");
+    shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+        loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": [3, 4]}"));
+    // If we don't have anything loaded, the rest would crash. It is already
+    // reported from within loadCheckAny if it isn't loaded.
+    if (oper) {
+        // The subexpressions are correct
+        ASSERT_EQ(2, oper->getSubexpressions().size());
+        // Note: this test relies on the ordering in which map returns it's
+        // elements, which is in the lexicographical order of the strings.
+        // This is not required from our interface, but is easier to write
+        // the test.
+        isSubexprNamed(&*oper, 0, "name1", el("1"));
+        const LogicOperator<AnyOfSpec, Log>*
+            orOper(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>(
+            oper->getSubexpressions()[1]));
+        ASSERT_TRUE(orOper) << "Different type than AnyOf operator";
+        EXPECT_EQ(2, orOper->getSubexpressions().size());
+        isSubexprNamed(orOper, 0, "name2", el("3"));
+        isSubexprNamed(orOper, 1, "name2", el("4"));
+    }
+}
+
+// But this is not abbreviated form, this should be passed directly to the
+// creator
+TEST_F(LoaderTest, ListCheck) {
+    addNamed("name1", false);
+    shared_ptr<NamedCheck> check(loadCheck("{\"name1\": [1, 2]}"));
+    EXPECT_EQ("name1", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("[1, 2]")));
+}
+
+// Check the action key is ignored as it should be
+TEST_F(LoaderTest, CheckNoAction) {
+    addNamed("name1");
+    shared_ptr<NamedCheck> check(loadCheck("{\"name1\": 1, \"action\": 2}"));
+    EXPECT_EQ("name1", check->name_);
+    EXPECT_TRUE(check->data_->equals(*el("1")));
+}
+
+// The empty ACL can be created and run, providing the default action
+TEST_F(LoaderTest, EmptyACL) {
+    aclRun("[]", REJECT, 0);
+}
+
+// We can create a simple ACL, which will return the correct default
+// action
+TEST_F(LoaderTest, NoMatchACL) {
+    aclRun("[{\"logcheck\": [0, false], \"action\": \"ACCEPT\"}]",
+           REJECT, 1);
+}
+
+// We can created more complicated ACL, it will match at the second
+// check
+TEST_F(LoaderTest, MatchACL) {
+    aclRun("["
+           "  {\"logcheck\": [0, false], \"action\": \"DROP\"},"
+           "  {\"logcheck\": [1, true], \"action\": \"ACCEPT\"}"
+           "]", ACCEPT, 2);
+}
+
+// ACL without a check (matches unconditionally)
+// We add another one check after it, to make sure it is really not run
+TEST_F(LoaderTest, NoCheckACL) {
+    aclRun("["
+           "  {\"action\": \"DROP\"},"
+           "  {\"throwcheck\": 1, \"action\": \"ACCEPT\"}"
+           "]", DROP, 0);
+}
+
+// Malformed things are rejected
+TEST_F(LoaderTest, InvalidACLFormat) {
+    // Not a list
+    aclException("{}");
+    aclException("42");
+    aclException("true");
+    aclException("null");
+    aclException("\"hello\"");
+    // Malformed element
+    aclException("[42]");
+    aclException("[\"hello\"]");
+    aclException("[[]]");
+    aclException("[true]");
+    aclException("[null]");
+}
+
+// If there's no action keyword, it is rejected
+TEST_F(LoaderTest, NoAction) {
+    aclException("[{}]");
+    aclException("[{\"logcheck\": [0, true]}]");
+}
+
+// Exceptions from check creation is propagated
+TEST_F(LoaderTest, ACLPropagate) {
+    aclSetup();
+    EXPECT_THROW(loader_.load(el("[{\"action\": \"ACCEPT\", \"throw\": 1}]")),
+                 TestCreatorError);
+
+}
+
+}
diff --git a/src/lib/acl/tests/logcheck.h b/src/lib/acl/tests/logcheck.h
new file mode 100644
index 0000000..776ff53
--- /dev/null
+++ b/src/lib/acl/tests/logcheck.h
@@ -0,0 +1,91 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LOGCHECK_H
+#define LOGCHECK_H
+
+#include <gtest/gtest.h>
+#include <acl/acl.h>
+#include <cassert>
+
+// This is not a public header, it is used only inside the tests. Therefore
+// we lower the standards a bit and use anonymous namespace in the header
+// and "using", just for convenience. This is just to share little bit of code
+// between multiple tests.
+using namespace isc::acl;
+using boost::shared_ptr;
+
+namespace {
+
+// This is arbitrary guess of size for the log. If it's too small for your
+// test, just make it bigger.
+const size_t LOG_SIZE = 10;
+
+// This will remember which checks did run already.
+struct Log {
+    // The actual log cells, if i-th check did run
+    mutable bool run[LOG_SIZE];
+    Log() {
+        // Nothing run yet
+        for (size_t i(0); i < LOG_SIZE; ++ i) {
+            run[i] = false;
+        }
+    }
+    // Checks that the first amount of checks did run and the rest didn't.
+    void checkFirst(size_t amount) const {
+        ASSERT_LE(amount, LOG_SIZE) << "Wrong test: amount bigger than size "
+            "of log";
+        {
+            SCOPED_TRACE("Checking that the first amount of checks did run");
+            for (size_t i(0); i < amount; ++ i) {
+                EXPECT_TRUE(run[i]) << "Check #" << i << " did not run.";
+            }
+        }
+
+        {
+            SCOPED_TRACE("Checking that the rest did not run");
+            for (size_t i(amount); i < LOG_SIZE; ++ i) {
+                EXPECT_FALSE(run[i]) << "Check #" << i << "did run.";
+            }
+        }
+    }
+};
+
+// This returns true or false every time, no matter what is passed to it.
+// But it logs that it did run.
+class ConstCheck : public Check<Log> {
+public:
+    ConstCheck(bool accepts, size_t logNum) :
+        logNum_(logNum),
+        accepts_(accepts)
+    {
+        assert(logNum < LOG_SIZE); // If this fails, the LOG_SIZE is too small
+    }
+    virtual bool matches(const Log& log) const {
+        /*
+         * This is abuse of the context. It is designed to carry the
+         * information to check, not to modify it. However, this is the
+         * easiest way to do the test, so we go against the design.
+         */
+        log.run[logNum_] = true;
+        return (accepts_);
+    }
+private:
+    size_t logNum_;
+    bool accepts_;
+};
+
+}
+
+#endif
diff --git a/src/lib/acl/tests/logic_check_test.cc b/src/lib/acl/tests/logic_check_test.cc
new file mode 100644
index 0000000..c4b00eb
--- /dev/null
+++ b/src/lib/acl/tests/logic_check_test.cc
@@ -0,0 +1,208 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/logic_check.h>
+
+using namespace isc::acl;
+
+namespace {
+
+// Test the defs in AnyOfSpec
+TEST(LogicOperators, AnyOfSpec) {
+    EXPECT_FALSE(AnyOfSpec::start());
+    EXPECT_FALSE(AnyOfSpec::terminate(false));
+    EXPECT_TRUE(AnyOfSpec::terminate(true));
+}
+
+// Test the defs in AllOfSpec
+TEST(LogicOperators, AllOfSpec) {
+    EXPECT_TRUE(AllOfSpec::start());
+    EXPECT_TRUE(AllOfSpec::terminate(false));
+    EXPECT_FALSE(AllOfSpec::terminate(true));
+}
+
+// Generic test of one check
+template<typename Mode>
+void
+testCheck(bool emptyResult) {
+    // It can be created
+    LogicOperator<Mode, Log> oper;
+    // It is empty by default
+    EXPECT_EQ(0, oper.getSubexpressions().size());
+    // And returns true, as all 0 of the subexpressions return true
+    Log log;
+    EXPECT_EQ(emptyResult, oper.matches(log));
+    log.checkFirst(0);
+    // Fill it with some subexpressions
+    typedef shared_ptr<ConstCheck> CheckPtr;
+    oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 0)));
+    oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 1)));
+    // Check what happens when only the default-valued are there
+    EXPECT_EQ(2, oper.getSubexpressions().size());
+    EXPECT_EQ(emptyResult, oper.matches(log));
+    log.checkFirst(2);
+    oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 2)));
+    oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 3)));
+    // They are listed there
+    EXPECT_EQ(4, oper.getSubexpressions().size());
+    // Now, the last one kills it, but the first ones will run, the fourth
+    // won't
+    EXPECT_EQ(!emptyResult, oper.matches(log));
+    log.checkFirst(3);
+}
+
+TEST(LogicOperators, AllOf) {
+    testCheck<AllOfSpec>(true);
+}
+
+TEST(LogicOperators, AnyOf) {
+    testCheck<AnyOfSpec>(false);
+}
+
+// Fixture for the tests of the creators
+class LogicCreatorTest : public ::testing::Test {
+private:
+    typedef shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
+public:
+    // Register some creators, both tested ones and some auxiliary ones for
+    // help
+    LogicCreatorTest():
+        loader_(REJECT)
+    {
+        loader_.registerCreator(CreatorPtr(new
+            LogicCreator<AnyOfSpec, Log>("ANY")));
+        loader_.registerCreator(CreatorPtr(new
+            LogicCreator<AllOfSpec, Log>("ALL")));
+        loader_.registerCreator(CreatorPtr(new ThrowCreator));
+        loader_.registerCreator(CreatorPtr(new LogCreator));
+    }
+    // To mark which parts of the check did run
+    Log log_;
+    // The loader
+    Loader<Log> loader_;
+    // Some convenience shortcut names
+    typedef LogicOperator<AnyOfSpec, Log> AnyOf;
+    typedef LogicOperator<AllOfSpec, Log> AllOf;
+    typedef shared_ptr<AnyOf> AnyOfPtr;
+    typedef shared_ptr<AllOf> AllOfPtr;
+    // Loads the JSON as a check and tries to convert it to the given check
+    // subclass
+    template<typename Result> shared_ptr<Result> load(const string& JSON) {
+        shared_ptr<Check<Log> > result;
+        EXPECT_NO_THROW(result = loader_.loadCheck(el(JSON)));
+        shared_ptr<Result>
+            resultConverted(dynamic_pointer_cast<Result>(result));
+        EXPECT_NE(shared_ptr<Result>(), resultConverted);
+        return (resultConverted);
+    }
+};
+
+// Test it can load empty ones
+TEST_F(LogicCreatorTest, empty) {
+    AnyOfPtr emptyAny(load<AnyOf>("{\"ANY\": []}"));
+    EXPECT_EQ(0, emptyAny->getSubexpressions().size());
+    AllOfPtr emptyAll(load<AllOf>("{\"ALL\": []}"));
+    EXPECT_EQ(0, emptyAll->getSubexpressions().size());
+}
+
+// Test it rejects invalid inputs (not a list as a parameter)
+TEST_F(LogicCreatorTest, invalid) {
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": null}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": {}}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": true}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": 42}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": \"hello\"}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": null}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": {}}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": true}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": 42}")), LoaderError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": \"hello\"}")), LoaderError);
+}
+
+// Exceptions from subexpression creation isn't caught
+TEST_F(LogicCreatorTest, propagate) {
+    EXPECT_THROW(loader_.loadCheck(el("{\"ANY\": [{\"throw\": null}]}")),
+                 TestCreatorError);
+    EXPECT_THROW(loader_.loadCheck(el("{\"ALL\": [{\"throw\": null}]}")),
+                 TestCreatorError);
+}
+
+// We can create more complex ANY check and run it correctly
+TEST_F(LogicCreatorTest, anyRun) {
+    AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+                             "    {\"logcheck\": [0, false]},"
+                             "    {\"logcheck\": [1, true]},"
+                             "    {\"logcheck\": [2, true]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_TRUE(any->matches(log_));
+    log_.checkFirst(2);
+}
+
+// We can create more complex ALL check and run it correctly
+TEST_F(LogicCreatorTest, allRun) {
+    AllOfPtr any(load<AllOf>("{\"ALL\": ["
+                             "    {\"logcheck\": [0, true]},"
+                             "    {\"logcheck\": [1, false]},"
+                             "    {\"logcheck\": [2, false]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_FALSE(any->matches(log_));
+    log_.checkFirst(2);
+}
+
+// Or is able to return false
+TEST_F(LogicCreatorTest, anyFalse) {
+    AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+                             "    {\"logcheck\": [0, false]},"
+                             "    {\"logcheck\": [1, false]},"
+                             "    {\"logcheck\": [2, false]}"
+                             "]}"));
+    EXPECT_EQ(3, any->getSubexpressions().size());
+    EXPECT_FALSE(any->matches(log_));
+    log_.checkFirst(3);
+}
+
+// And is able to return true
+TEST_F(LogicCreatorTest, andTrue) {
+    AllOfPtr all(load<AllOf>("{\"ALL\": ["
+                             "    {\"logcheck\": [0, true]},"
+                             "    {\"logcheck\": [1, true]},"
+                             "    {\"logcheck\": [2, true]}"
+                             "]}"));
+    EXPECT_EQ(3, all->getSubexpressions().size());
+    EXPECT_TRUE(all->matches(log_));
+    log_.checkFirst(3);
+}
+
+// We can nest them together
+TEST_F(LogicCreatorTest, nested) {
+    AllOfPtr all(load<AllOf>("{\"ALL\": ["
+                             "    {\"ANY\": ["
+                             "        {\"logcheck\": [0, true]},"
+                             "        {\"logcheck\": [2, true]},"
+                             "    ]},"
+                             "    {\"logcheck\": [1, false]}"
+                             "]}"));
+    EXPECT_EQ(2, all->getSubexpressions().size());
+    const LogicOperator<AnyOfSpec, Log>*
+        any(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>
+            (all->getSubexpressions()[0]));
+    EXPECT_EQ(2, any->getSubexpressions().size());
+    EXPECT_FALSE(all->matches(log_));
+    log_.checkFirst(2);
+}
+
+}
diff --git a/src/lib/acl/tests/run_unittests.cc b/src/lib/acl/tests/run_unittests.cc
index 61df6cf..8dc59a2 100644
--- a/src/lib/acl/tests/run_unittests.cc
+++ b/src/lib/acl/tests/run_unittests.cc
@@ -13,11 +13,12 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <gtest/gtest.h>
+#include <log/logger_support.h>
 #include <util/unittests/run_all.h>
 
 int
 main(int argc, char* argv[]) {
     ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
     return (isc::util::unittests::run_all());
 }
-
diff --git a/src/lib/asiodns/Makefile.am b/src/lib/asiodns/Makefile.am
index 2a6c3ac..2d246ef 100644
--- a/src/lib/asiodns/Makefile.am
+++ b/src/lib/asiodns/Makefile.am
@@ -8,13 +8,13 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-CLEANFILES = *.gcno *.gcda asiodef.h asiodef.cc
+CLEANFILES = *.gcno *.gcda asiodns_messages.h asiodns_messages.cc
 
 # Define rule to build logging source files from message file
-asiodef.h asiodef.cc: asiodef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodef.mes
+asiodns_messages.h asiodns_messages.cc: asiodns_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodns_messages.mes
 
-BUILT_SOURCES = asiodef.h asiodef.cc
+BUILT_SOURCES = asiodns_messages.h asiodns_messages.cc
 
 lib_LTLIBRARIES = libasiodns.la
 libasiodns_la_SOURCES = dns_answer.h
@@ -26,9 +26,9 @@ libasiodns_la_SOURCES += tcp_server.cc tcp_server.h
 libasiodns_la_SOURCES += udp_server.cc udp_server.h
 libasiodns_la_SOURCES += io_fetch.cc io_fetch.h
 
-nodist_libasiodns_la_SOURCES = asiodef.cc asiodef.h
+nodist_libasiodns_la_SOURCES = asiodns_messages.cc asiodns_messages.h
 
-EXTRA_DIST = asiodef.mes
+EXTRA_DIST = asiodns_messages.mes
 
 # Note: the ordering matters: -Wno-... must follow -Wextra (defined in
 # B10_CXXFLAGS)
diff --git a/src/lib/asiodns/asiodef.mes b/src/lib/asiodns/asiodef.mes
deleted file mode 100644
index 3f2e80c..0000000
--- a/src/lib/asiodns/asiodef.mes
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX ASIODNS_
-$NAMESPACE isc::asiodns
-
-% FETCHCOMP   upstream fetch to %1(%2) has now completed
-A debug message, this records the the upstream fetch (a query made by the
-resolver on behalf of its client) to the specified address has completed.
-
-% FETCHSTOP   upstream fetch to %1(%2) has been stopped
-An external component has requested the halting of an upstream fetch.  This
-is an allowed operation, and the message should only appear if debug is
-enabled.
-
-% OPENSOCK    error %1 opening %2 socket to %3(%4)
-The asynchronous I/O code encountered an error when trying to open a socket
-of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
-message.
-
-% RECVSOCK    error %1 reading %2 data from %3(%4)
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-
-% SENDSOCK    error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-
-% RECVTMO     receive timeout while waiting for data from %1(%2)
-An upstream fetch from the specified address timed out.  This may happen for
-any number of reasons and is most probably a problem at the remote server
-or a problem on the network.  The message will only appear if debug is
-enabled.
-
-% UNKORIGIN  unknown origin for ASIO error code %1 (protocol: %2, address %3)
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-
-% UNKRESULT  unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message).  This message should
-not appear and may indicate an internal error.  Please enter a bug report.
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
new file mode 100644
index 0000000..3e11ede
--- /dev/null
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -0,0 +1,56 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::asiodns
+
+% ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+
+% ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped
+An external component has requested the halting of an upstream fetch.  This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+
+% ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that cause the problem is given in the
+message.
+
+% ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol.  The number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
+An upstream fetch from the specified address timed out.  This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network.  The message will only appear if debug is
+enabled.
+
+% ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol.  The the number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+
+% ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message).  Please submit a bug report.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 4b2edf9..31b5f50 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -41,7 +41,7 @@
 #include <log/logger.h>
 #include <log/macros.h>
 
-#include <asiodns/asiodef.h>
+#include <asiodns/asiodns_messages.h>
 #include <asiodns/io_fetch.h>
 
 #include <util/buffer.h>
@@ -158,7 +158,7 @@ struct IOFetchData {
         stopped(false),
         timeout(wait),
         packet(false),
-        origin(ASIODNS_UNKORIGIN),
+        origin(ASIODNS_UNKNOWN_ORIGIN),
         staging(),
         qid(QidGenerator::getInstance().generateQid())
     {}
@@ -280,7 +280,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
 
         // Open a connection to the target system.  For speed, if the operation
         // is synchronous (i.e. UDP operation) we bypass the yield.
-        data_->origin = ASIODNS_OPENSOCK;
+        data_->origin = ASIODNS_OPEN_SOCKET;
         if (data_->socket->isOpenSynchronous()) {
             data_->socket->open(data_->remote_snd.get(), *this);
         } else {
@@ -290,7 +290,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
         do {
             // Begin an asynchronous send, and then yield.  When the send completes,
             // we will resume immediately after this point.
-            data_->origin = ASIODNS_SENDSOCK;
+            data_->origin = ASIODNS_SEND_DATA;
             CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
                 data_->msgbuf->getLength(), data_->remote_snd.get(), *this);
     
@@ -313,7 +313,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
             // received all the data before copying it back to the user's buffer.
             // And we want to minimise the amount of copying...
     
-            data_->origin = ASIODNS_RECVSOCK;
+            data_->origin = ASIODNS_READ_DATA;
             data_->cumulative = 0;          // No data yet received
             data_->offset = 0;              // First data into start of buffer
             data_->received->clear();       // Clear the receive buffer
@@ -329,7 +329,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
 
         // Finished with this socket, so close it.  This will not generate an
         // I/O error, but reset the origin to unknown in case we change this.
-        data_->origin = ASIODNS_UNKORIGIN;
+        data_->origin = ASIODNS_UNKNOWN_ORIGIN;
         data_->socket->close();
 
         /// We are done
@@ -367,13 +367,13 @@ IOFetch::stop(Result result) {
         data_->stopped = true;
         switch (result) {
             case TIME_OUT:
-                LOG_DEBUG(logger, DBG_COMMON, ASIODNS_RECVTMO).
+                LOG_DEBUG(logger, DBG_COMMON, ASIODNS_READ_TIMEOUT).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
                 break;
 
             case SUCCESS:
-                LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCHCOMP).
+                LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCH_COMPLETED).
                     arg(data_->remote_rcv->getAddress().toText()).
                     arg(data_->remote_rcv->getPort());
                 break;
@@ -382,13 +382,13 @@ IOFetch::stop(Result result) {
                 // Fetch has been stopped for some other reason.  This is
                 // allowed but as it is unusual it is logged, but with a lower
                 // debug level than a timeout (which is totally normal).
-                LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCHSTOP).
+                LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCH_STOPPED).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
                 break;
 
             default:
-                LOG_ERROR(logger, ASIODNS_UNKRESULT).
+                LOG_ERROR(logger, ASIODNS_UNKNOWN_RESULT).
                     arg(data_->remote_snd->getAddress().toText()).
                     arg(data_->remote_snd->getPort());
         }
@@ -412,10 +412,10 @@ IOFetch::stop(Result result) {
 void IOFetch::logIOFailure(asio::error_code ec) {
 
     // Should only get here with a known error code.
-    assert((data_->origin == ASIODNS_OPENSOCK) ||
-           (data_->origin == ASIODNS_SENDSOCK) ||
-           (data_->origin == ASIODNS_RECVSOCK) ||
-           (data_->origin == ASIODNS_UNKORIGIN));
+    assert((data_->origin == ASIODNS_OPEN_SOCKET) ||
+           (data_->origin == ASIODNS_SEND_DATA) ||
+           (data_->origin == ASIODNS_READ_DATA) ||
+           (data_->origin == ASIODNS_UNKNOWN_ORIGIN));
 
     static const char* PROTOCOL[2] = {"TCP", "UDP"};
     LOG_ERROR(logger, data_->origin).arg(ec.value()).
diff --git a/src/lib/config/Makefile.am b/src/lib/config/Makefile.am
index 52337ad..500ff12 100644
--- a/src/lib/config/Makefile.am
+++ b/src/lib/config/Makefile.am
@@ -6,10 +6,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 
 # Define rule to build logging source files from message file
-configdef.h configdef.cc: configdef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/configdef.mes
+config_messages.h config_messages.cc: config_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/config_messages.mes
 
-BUILT_SOURCES = configdef.h configdef.cc
+BUILT_SOURCES = config_messages.h config_messages.cc
 
 lib_LTLIBRARIES = libcfgclient.la
 libcfgclient_la_SOURCES = config_data.h config_data.cc
@@ -17,9 +17,9 @@ libcfgclient_la_SOURCES += module_spec.h module_spec.cc
 libcfgclient_la_SOURCES += ccsession.cc ccsession.h
 libcfgclient_la_SOURCES += config_log.h config_log.cc
 
-nodist_libcfgclient_la_SOURCES  = configdef.h configdef.cc
+nodist_libcfgclient_la_SOURCES  = config_messages.h config_messages.cc
 
 # The message file should be in the distribution.
-EXTRA_DIST = configdef.mes
+EXTRA_DIST = config_messages.mes
 
-CLEANFILES = *.gcno *.gcda configdef.h configdef.cc
+CLEANFILES = *.gcno *.gcda config_messages.h config_messages.cc
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index 857de63..9285940 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -247,7 +247,9 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
 } // end anonymous namespace
 
 void
-my_logconfig_handler(const std::string&n, ConstElementPtr new_config, const ConfigData& config_data) {
+default_logconfig_handler(const std::string& module_name,
+                          ConstElementPtr new_config,
+                          const ConfigData& config_data) {
     config_data.getModuleSpec().validateConfig(new_config, true);
 
     std::vector<isc::log::LoggerSpecification> specs;
@@ -272,7 +274,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
     // this file should be declared in a @something@ directive
     file.open(filename.c_str());
     if (!file) {
-        LOG_ERROR(config_logger, CONFIG_FOPEN_ERR).arg(filename).arg(strerror(errno));
+        LOG_ERROR(config_logger, CONFIG_OPEN_FAIL).arg(filename).arg(strerror(errno));
         isc_throw(CCSessionInitError, strerror(errno));
     }
 
@@ -282,7 +284,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
         LOG_ERROR(config_logger, CONFIG_JSON_PARSE).arg(filename).arg(pe.what());
         isc_throw(CCSessionInitError, pe.what());
     } catch (const ModuleSpecError& dde) {
-        LOG_ERROR(config_logger, CONFIG_MODULE_SPEC).arg(filename).arg(dde.what());
+        LOG_ERROR(config_logger, CONFIG_MOD_SPEC_FORMAT).arg(filename).arg(dde.what());
         isc_throw(CCSessionInitError, dde.what());
     }
     file.close();
@@ -332,7 +334,7 @@ ModuleCCSession::ModuleCCSession(
     int rcode;
     ConstElementPtr err = parseAnswer(rcode, answer);
     if (rcode != 0) {
-        LOG_ERROR(config_logger, CONFIG_MANAGER_MOD_SPEC).arg(answer->str());
+        LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
         isc_throw(CCSessionInitError, answer->str());
     }
     
@@ -346,14 +348,14 @@ ModuleCCSession::ModuleCCSession(
         if (rcode == 0) {
             handleConfigUpdate(new_config);
         } else {
-            LOG_ERROR(config_logger, CONFIG_MANAGER_CONFIG).arg(new_config->str());
+            LOG_ERROR(config_logger, CONFIG_GET_FAIL).arg(new_config->str());
             isc_throw(CCSessionInitError, answer->str());
         }
     }
 
     // Keep track of logging settings automatically
     if (handle_logging) {
-        addRemoteConfig("Logging", my_logconfig_handler, false);
+        addRemoteConfig("Logging", default_logconfig_handler, false);
     }
 
     if (start_immediately) {
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index 53aab78..0d4b7f3 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -354,6 +354,25 @@ private:
     ModuleSpec fetchRemoteSpec(const std::string& module, bool is_filename);
 };
 
+/// \brief Default handler for logging config updates
+///
+/// When CCSession is initialized with handle_logging set to true,
+/// this callback will be used to update the logger when a configuration
+/// change comes in.
+///
+/// This function updates the (global) loggers by initializing a
+/// LoggerManager and passing the settings as specified in the given
+/// configuration update.
+///
+/// \param module_name The name of the module
+/// \param new_config The modified configuration values
+/// \param config_data The full config data for the (remote) logging
+///                    module.
+void
+default_logconfig_handler(const std::string& module_name,
+                          isc::data::ConstElementPtr new_config,
+                          const ConfigData& config_data);
+
 }
 }
 #endif // __CCSESSION_H
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 22e5a5c..0063855 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -16,7 +16,7 @@
 #define __CONFIG_LOG__H
 
 #include <log/macros.h>
-#include "configdef.h"
+#include "config_messages.h"
 
 namespace isc {
 namespace config {
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
new file mode 100644
index 0000000..660ab9a
--- /dev/null
+++ b/src/lib/config/config_messages.mes
@@ -0,0 +1,59 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::config
+
+% CONFIG_CCSESSION_MSG error in CC session message: %1
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+
+% CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+
+The most likely cause of this error is a programming error.  Please raise
+a bug report.
+
+% CONFIG_GET_FAIL error getting configuration from cfgmgr: %1
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+
+% CONFIG_JSON_PARSE JSON parse error in %1: %2
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+
+% CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+
+% CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+
+% CONFIG_OPEN_FAIL error opening %1: %2
+There was an error opening the given file. The reason for the failure
+is included in the message.
diff --git a/src/lib/config/configdef.mes b/src/lib/config/configdef.mes
deleted file mode 100644
index be39073..0000000
--- a/src/lib/config/configdef.mes
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX CONFIG_
-$NAMESPACE isc::config
-
-% FOPEN_ERR     error opening %1: %2
-There was an error opening the given file.
-
-% JSON_PARSE    JSON parse error in %1: %2
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-
-% MODULE_SPEC   module specification error in %1: %2
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
-
-% MANAGER_MOD_SPEC    module specification not accepted by cfgmgr: %1
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-
-% MANAGER_CONFIG    error getting configuration from cfgmgr: %1
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
-
-% CCSESSION_MSG error in CC session message: %1
-There was a problem with an incoming message on the command and control
-channel. The message does not appear to be a valid command, and is
-missing a required element or contains an unknown data format. This
-most likely means that another BIND10 module is sending a bad message.
-The message itself is ignored by this module.
-
-% CCSESSION_MSG_INTERNAL error handling CC session message: %1
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index e028186..457d5b0 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,7 +7,7 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-CLEANFILES = *.gcno *.gcda messagedef.h messagedef.cc
+CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
 
 lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
@@ -21,15 +21,15 @@ libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
 libdatasrc_la_SOURCES += zone.h
 libdatasrc_la_SOURCES += result.h
 libdatasrc_la_SOURCES += logger.h logger.cc
-nodist_libdatasrc_la_SOURCES = messagedef.h messagedef.cc
+nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
 libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 
-BUILT_SOURCES = messagedef.h messagedef.cc
-messagedef.h messagedef.cc: Makefile messagedef.mes
-	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/messagedef.mes
+BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 
-EXTRA_DIST = messagedef.mes
+EXTRA_DIST = datasrc_messages.mes
diff --git a/src/lib/datasrc/cache.cc b/src/lib/datasrc/cache.cc
index 8e9487d..9082a6b 100644
--- a/src/lib/datasrc/cache.cc
+++ b/src/lib/datasrc/cache.cc
@@ -100,6 +100,19 @@ public:
     /// \return \c RRsetPtr
     RRsetPtr getRRset() const { return (entry->rrset); }
 
+    /// \brief Returns name associated with cached node
+    ///
+    /// This is the name associated with the RRset if it is a positive
+    /// entry, and the associated question name if the RRSet is NULL
+    /// and this is a negative entry (together with an indication that
+    /// this is a negative entry).
+    string getNodeName() const {
+        if (getRRset()) {
+            return (getRRset()->getName().toText());
+        }
+        return (std::string("negative entry for ") + question.toText());
+    }
+
     /// \brief Returns the query response flags associated with the data.
     ///
     /// \return \c uint32_t
@@ -213,7 +226,7 @@ HotCacheImpl::HotCacheImpl(int slots, bool enabled) :
 inline void
 HotCacheImpl::insert(const CacheNodePtr node) {
     LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_INSERT).
-        arg(node->getRRset()->getName());
+        arg(node->getNodeName());
     std::map<Question, CacheNodePtr>::const_iterator iter;
     iter = map_.find(node->question);
     if (iter != map_.end()) {
@@ -253,7 +266,7 @@ HotCacheImpl::promote(CacheNodePtr node) {
 void
 HotCacheImpl::remove(ConstCacheNodePtr node) {
     LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_REMOVE).
-        arg(node->getRRset()->getName());
+        arg(node->getNodeName());
     lru_.erase(node->lru_entry_);
     map_.erase(node->question);
     --count_;
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
new file mode 100644
index 0000000..c692364
--- /dev/null
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -0,0 +1,493 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::datasrc
+
+# \brief Messages for the data source library
+
+% DATASRC_CACHE_CREATE creating the hotspot cache
+Debug information that the hotspot cache was created at startup.
+
+% DATASRC_CACHE_DESTROY destroying the hotspot cache
+Debug information. The hotspot cache is being destroyed.
+
+% DATASRC_CACHE_DISABLE disabling the cache
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+
+% DATASRC_CACHE_ENABLE enabling the cache
+The hotspot cache is enabled from now on.
+
+% DATASRC_CACHE_EXPIRED the item '%1' is expired
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+
+% DATASRC_CACHE_FOUND the item '%1' was found
+Debug information. An item was successfully looked up in the hotspot cache.
+
+% DATASRC_CACHE_FULL cache is full, dropping oldest
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_INSERT inserting item '%1' into the cache
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+
+% DATASRC_CACHE_NOT_FOUND the item '%1' was not found
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+
+% DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_REMOVE removing '%1' from the cache
+Debug information. An item is being removed from the hotspot cache.
+
+% DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+
+% DATASRC_DO_QUERY handling query for '%1/%2'
+Debug information. We're processing some internal query for given name and
+type.
+
+% DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
+Debug information. An RRset is being added to the in-memory data source.
+
+% DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+
+% DATASRC_MEM_ADD_ZONE adding zone '%1/%2'
+Debug information. A zone is being added into the in-memory data source.
+
+% DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+
+% DATASRC_MEM_CNAME CNAME at the domain '%1'
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+
+% DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+
+% DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+
+% DATASRC_MEM_CREATE creating zone '%1' in '%2' class
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+
+% DATASRC_MEM_DELEG_FOUND delegation found at '%1'
+Debug information. A delegation point was found above the requested record.
+
+% DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class
+Debug information. A zone from in-memory data source is being destroyed.
+
+% DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way.  This may lead to redirection to a different domain and
+stop the search.
+
+% DATASRC_MEM_DNAME_FOUND DNAME found at '%1'
+Debug information. A DNAME was found instead of the requested information.
+
+% DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+
+% DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+
+% DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'
+An RRset is being inserted into in-memory data source for a second time.  The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+
+% DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+
+% DATASRC_MEM_FIND find '%1/%2'
+Debug information. A search for the requested RRset is being started.
+
+% DATASRC_MEM_FIND_ZONE looking for zone '%1'
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+
+% DATASRC_MEM_LOAD loading zone '%1' from file '%2'
+Debug information. The content of master file is being loaded into the memory.
+
+% DATASRC_MEM_NOTFOUND requested domain '%1' not found
+Debug information. The requested domain does not exist.
+
+% DATASRC_MEM_NS_ENCOUNTERED encountered a NS
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+
+% DATASRC_MEM_NXRRSET no such type '%1' at '%2'
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+
+% DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+
+% DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+
+% DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+
+% DATASRC_MEM_SUCCESS query for '%1/%2' successful
+Debug information. The requested record was found.
+
+% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+
+% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+
+% DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here.  This
+behaviour is specified by RFC 1034, section 4.3.3
+
+% DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
+The software refuses to load DNAME records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'
+The software refuses to load NS records into a wildcard domain.  It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_META_ADD adding a data source into meta data source
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+
+% DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+
+% DATASRC_META_REMOVE removing data source from meta data source
+Debug information. A data source is being removed from meta data source.
+
+% DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'
+Debug information. A NSEC record covering this zone is being added.
+
+% DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+
+% DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message
+Debug information. An RRset is being added to the response message.
+
+% DATASRC_QUERY_ADD_SOA adding SOA of '%1'
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+
+% DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+
+% DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+
+% DATASRC_QUERY_CACHED data for %1/%2 found in cache
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+
+% DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+
+% DATASRC_QUERY_COPY_AUTH copying authoritative section into message
+Debug information. The whole referral information is being copied into the
+response message.
+
+% DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+
+% DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+
+% DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+
+% DATASRC_QUERY_FAIL query failed
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+
+% DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+
+% DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+
+% DATASRC_QUERY_INVALID_OP invalid query operation requested
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+
+% DATASRC_QUERY_IS_AUTH auth query (%1/%2)
+Debug information. The last DO_QUERY is an auth query.
+
+% DATASRC_QUERY_IS_GLUE glue query (%1/%2)
+Debug information. The last DO_QUERY is query for glue addresses.
+
+% DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+
+% DATASRC_QUERY_IS_REF query for referral (%1/%2)
+Debug information. The last DO_QUERY is query for referral information.
+
+% DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)
+Debug information. The last DO_QUERY is a simple query.
+
+% DATASRC_QUERY_MISPLACED_TASK task of this type should not be here
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+
+% DATASRC_QUERY_MISSING_NS missing NS records for '%1'
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+
+% DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+
+% DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+
+% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+
+% DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+
+% DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class
+Debug information. A sure query is being processed now.
+
+% DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+
+% DATASRC_QUERY_REF_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+
+% DATASRC_QUERY_RRSIG unable to answer RRSIG query
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+
+% DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+
+% DATASRC_QUERY_TASK_FAIL task failed with %1
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+
+% DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+
+% DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+
+% DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+
+% DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record.  The code is 1 for error and 2 for not implemented.
+
+% DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it.  The code is 1 for error, 2 for not implemented.
+
+% DATASRC_SQLITE_CLOSE closing SQLite database
+Debug information. The SQLite data source is closing the database file.
+% DATASRC_SQLITE_CREATE SQLite data source created
+Debug information. An instance of SQLite data source is being created.
+
+% DATASRC_SQLITE_DESTROY SQLite data source destroyed
+Debug information. An instance of SQLite data source is being destroyed.
+
+% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+
+% DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+
+% DATASRC_SQLITE_FIND looking for RRset '%1/%2'
+Debug information. The SQLite data source is looking up a resource record
+set.
+
+% DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
+Debug information. The data source is looking up the addresses for given
+domain name.
+
+% DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'
+Debug information. The SQLite data source is looking up an exact resource
+record.
+
+% DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDREC looking for record '%1/%2'
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+
+% DATASRC_SQLITE_FINDREF looking for referral at '%1'
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+
+% DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+
+% DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+
+% DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+
+% DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+
+% DATASRC_SQLITE_OPEN opening SQLite database '%1'
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+
+% DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'
+Debug information. We're trying to look up name preceding the supplied one.
+
+% DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+
+% DATASRC_SQLITE_SETUP setting up SQLite database
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema.  It'll still contain
+no data, but it will be ready for use.
+
+% DATASRC_STATIC_BAD_CLASS static data source can handle CH only
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+
+% DATASRC_STATIC_CREATE creating the static datasource
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+
+% DATASRC_STATIC_FIND looking for '%1/%2'
+Debug information. This resource record set is being looked up in the static
+data source.
+
+% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
+This indicates a programming error. An internal task of unknown type was
+generated.
+
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index 7c2828d..ac5d50b 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -16,7 +16,7 @@
 #define __DATASRC_LOGGER_H
 
 #include <log/macros.h>
-#include <datasrc/messagedef.h>
+#include <datasrc/datasrc_messages.h>
 
 /// \file logger.h
 /// \brief Data Source library global logger
diff --git a/src/lib/datasrc/messagedef.mes b/src/lib/datasrc/messagedef.mes
deleted file mode 100644
index 0374306..0000000
--- a/src/lib/datasrc/messagedef.mes
+++ /dev/null
@@ -1,494 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX DATASRC_
-$NAMESPACE isc::datasrc
-
-# \brief Messages for the data source library
-
-% CACHE_CREATE creating the hotspot cache
-Debug information that the hotspot cache was created at startup.
-
-% CACHE_DESTROY destroying the hotspot cache
-Debug information. The hotspot cache is being destroyed.
-
-% CACHE_INSERT inserting item '%1' into the cache
-Debug information. It means a new item is being inserted into the hotspot
-cache.
-
-% CACHE_OLD_FOUND older instance of cache item found, replacing
-Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_FULL cache is full, dropping oldest
-Debug information. After inserting an item into the hotspot cache, the
-maximum number of items was exceeded, so the least recently used item will
-be dropped. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_REMOVE removing '%1' from the cache
-Debug information. An item is being removed from the hotspot cache.
-
-% CACHE_NOT_FOUND the item '%1' was not found
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-
-% CACHE_FOUND the item '%1' was found
-Debug information. An item was successfully looked up in the hotspot cache.
-
-% CACHE_EXPIRED the item '%1' is expired
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
-
-% CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
-The maximum allowed number of items of the hotspot cache is set to the given
-number. If there are too many, some of them will be dropped. The size of 0
-means no limit.
-
-% CACHE_ENABLE enabling the cache
-The hotspot cache is enabled from now on.
-
-% CACHE_DISABLE disabling the cache
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-
-% QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
-
-% QUERY_EMPTY_DNAME the DNAME on '%1' is empty
-During an attempt to synthesize CNAME from this DNAME it was discovered the
-DNAME is empty (it has no records). This indicates problem with supplied data.
-
-% QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
-Debug information. While processing a query, a NS record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
-Debug information. While processing a query, a MX record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_FOLLOW_CNAME following CNAME at '%1'
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
-
-% QUERY_EMPTY_CNAME cNAME at '%1' is empty
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
-
-% QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'
-A CNAME led to another CNAME and it led to another, and so on. After 16
-CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
-might possibly be a loop as well. Note that some of the CNAMEs might have
-been synthesized from DNAMEs. This indicates problem with supplied data.
-
-% QUERY_CHECK_CACHE checking cache for '%1/%2'
-Debug information. While processing a query, lookup to the hotspot cache
-is being made.
-
-% QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for ANY queries for consistency
-reasons.
-
-% QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for authoritative ANY queries
-for consistency reasons.
-
-% DO_QUERY handling query for '%1/%2'
-Debug information. We're processing some internal query for given name and
-type.
-
-% QUERY_NO_ZONE no zone containing '%1' in class '%2'
-Lookup of domain failed because the data have no zone that contain the
-domain. Maybe someone sent a query to the wrong server for some reason.
-
-% QUERY_CACHED data for %1/%2 found in cache
-Debug information. The requested data were found in the hotspot cache, so
-no query is sent to the real data source.
-
-% QUERY_IS_SIMPLE simple query (%1/%2)
-Debug information. The last DO_QUERY is a simple query.
-
-% QUERY_IS_AUTH auth query (%1/%2)
-Debug information. The last DO_QUERY is an auth query.
-
-% QUERY_IS_GLUE glue query (%1/%2)
-Debug information. The last DO_QUERY is query for glue addresses.
-
-% QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
-Debug information. The last DO_QUERY is query for addresses that are not
-glue.
-
-% QUERY_IS_REF query for referral (%1/%2)
-Debug information. The last DO_QUERY is query for referral information.
-
-% QUERY_SIMPLE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the simple query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_AUTH_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the authoritative query. 1 means
-some error, 2 is not implemented. The data source should have logged the
-specific error already.
-
-% QUERY_GLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the glue query. 1 means some error,
-2 is not implemented. The data source should have logged the specific error
-already.
-
-% QUERY_NOGLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the no-glue query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_REF_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the query for referral information.
-1 means some error, 2 is not implemented. The data source should have logged
-the specific error already.
-
-% QUERY_INVALID_OP invalid query operation requested
-This indicates a programmer error. The DO_QUERY was called with unknown
-operation code.
-
-% QUERY_ADD_RRSET adding RRset '%1/%2' to message
-Debug information. An RRset is being added to the response message.
-
-% QUERY_COPY_AUTH copying authoritative section into message
-Debug information. The whole referral information is being copied into the
-response message.
-
-% QUERY_DELEGATION looking for delegation on the path to '%1'
-Debug information. The software is trying to identify delegation points on the
-way down to the given domain.
-
-% QUERY_ADD_SOA adding SOA of '%1'
-Debug information. A SOA record of the given zone is being added to the
-authority section of the response message.
-
-% QUERY_ADD_NSEC adding NSEC record for '%1'
-Debug information. A NSEC record covering this zone is being added.
-
-% QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
-Debug information. A NSEC3 record for the given zone is being added to the
-response message.
-
-% QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
-An attempt to add a NSEC3 record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
-An attempt to add a NSEC record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_WILDCARD looking for a wildcard covering '%1'
-Debug information. A direct match wasn't found, so a wildcard covering the
-domain is being looked for now.
-
-% QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
-While processing a wildcard, it wasn't possible to prove nonexistence of the
-given domain or record.  The code is 1 for error and 2 for not implemented.
-
-% QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
-While processing a wildcard, a referral was met. But it wasn't possible to get
-enough information for it.  The code is 1 for error, 2 for not implemented.
-
-% QUERY_PROCESS processing query '%1/%2' in the '%3' class
-Debug information. A sure query is being processed now.
-
-% QUERY_RRSIG unable to answer RRSIG query
-The server is unable to answer a direct query for RRSIG type, but was asked
-to do so.
-
-% QUERY_MISPLACED_TASK task of this type should not be here
-This indicates a programming error. A task was found in the internal task
-queue, but this kind of task wasn't designed to be inside the queue (it should
-be handled right away, not queued).
-
-% QUERY_TASK_FAIL task failed with %1
-The query subtask failed. The reason should have been reported by the subtask
-already. The code is 1 for error, 2 for not implemented.
-
-% QUERY_MISSING_NS missing NS records for '%1'
-NS records should have been put into the authority section. However, this zone
-has none. This indicates problem with provided data.
-
-% UNEXPECTED_QUERY_STATE unexpected query state
-This indicates a programming error. An internal task of unknown type was
-generated.
-
-% QUERY_FAIL query failed
-Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
-
-% QUERY_BAD_REFERRAL bad referral to '%1'
-The domain lives in another zone. But it is not possible to generate referral
-information for it.
-
-% QUERY_WILDCARD_FAIL error processing wildcard for '%1'
-During an attempt to cover the domain by a wildcard an error happened. The
-exact kind was hopefully already reported.
-
-% QUERY_MISSING_SOA the zone '%1' has no SOA
-The answer should have been a negative one (eg. of nonexistence of something).
-To do so, a SOA record should be put into the authority section, but the zone
-does not have one. This indicates problem with provided data.
-
-% QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
-The user wants DNSSEC and we discovered the entity doesn't exist (either
-domain or the record). But there was an error getting NSEC/NSEC3 record
-to prove the nonexistence.
-
-% QUERY_UNKNOWN_RESULT unknown result of subtask
-This indicates a programmer error. The answer of subtask doesn't look like
-anything known.
-
-% META_ADD adding a data source into meta data source
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
-
-% META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
-It was attempted to add a data source into a meta data source. But their
-classes do not match.
-
-% META_REMOVE removing data source from meta data source
-Debug information. A data source is being removed from meta data source.
-
-% MEM_ADD_WILDCARD adding wildcards for '%1'
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
-
-% MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
-Someone or something tried to add a CNAME into a domain that already contains
-some other data. But the protocol forbids coexistence of CNAME with anything
-(RFC 1034, section 3.6.2). This indicates a problem with provided data.
-
-% MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
-This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
-
-% MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
-
-% MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
-Some resource types are singletons -- only one is allowed in a domain
-(for example CNAME or SOA). This indicates a problem with provided data.
-
-% MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
-It was attempted to add the domain into a zone that shouldn't have it
-(eg. the domain is not subdomain of the zone origin). This indicates a
-problem with provided data.
-
-% MEM_WILDCARD_NS nS record in wildcard domain '%1'
-The software refuses to load NS records into a wildcard domain.  It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'
-The software refuses to load DNAME records into a wildcard domain.  It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
-Debug information. An RRset is being added to the in-memory data source.
-
-% MEM_DUP_RRSET duplicate RRset '%1/%2'
-An RRset is being inserted into in-memory data source for a second time.  The
-original version must be removed first. Note that loading master files where an
-RRset is split into multiple locations is not supported yet.
-
-% MEM_DNAME_ENCOUNTERED encountered a DNAME
-Debug information. While searching for the requested domain, a DNAME was
-encountered on the way.  This may lead to redirection to a different domain and
-stop the search.
-
-% MEM_NS_ENCOUNTERED encountered a NS
-Debug information. While searching for the requested domain, a NS was
-encountered on the way (a delegation). This may lead to stop of the search.
-
-% MEM_RENAME renaming RRset from '%1' to '%2'
-Debug information. A RRset is being generated from a different RRset (most
-probably a wildcard). So it must be renamed to whatever the user asked for. In
-fact, it's impossible to rename RRsets with our libraries, so a new one is
-created and all resource records are copied over.
-
-% MEM_FIND find '%1/%2'
-Debug information. A search for the requested RRset is being started.
-
-% MEM_DNAME_FOUND DNAME found at '%1'
-Debug information. A DNAME was found instead of the requested information.
-
-% MEM_DELEG_FOUND delegation found at '%1'
-Debug information. A delegation point was found above the requested record.
-
-% MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
-Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated  as NXRRSET
-case (eg. the domain exists, but it doesn't have the requested record type).
-
-% MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
-Debug information. A domain above wildcard was reached, but there's something
-below the requested domain. Therefore the wildcard doesn't apply here.  This
-behaviour is specified by RFC 1034, section 4.3.3
-
-% MEM_NOTFOUND requested domain '%1' not found
-Debug information. The requested domain does not exist.
-
-% MEM_DOMAIN_EMPTY requested domain '%1' is empty
-Debug information. The requested domain exists in the tree of domains, but
-it is empty. Therefore it doesn't contain the requested resource type.
-
-% MEM_EXACT_DELEGATION delegation at the exact domain '%1'
-Debug information. There's a NS record at the requested domain. This means
-this zone is not authoritative for the requested domain, but a delegation
-should be followed. The requested domain is an apex of some zone.
-
-% MEM_ANY_SUCCESS ANY query for '%1' successful
-Debug information. The domain was found and an ANY type query is being answered
-by providing everything found inside the domain.
-
-% MEM_SUCCESS query for '%1/%2' successful
-Debug information. The requested record was found.
-
-% MEM_CNAME CNAME at the domain '%1'
-Debug information. The requested domain is an alias to a different domain,
-returning the CNAME instead.
-
-% MEM_NXRRSET no such type '%1' at '%2'
-Debug information. The domain exists, but it doesn't hold any record of the
-requested type.
-
-% MEM_CREATE creating zone '%1' in '%2' class
-Debug information. A representation of a zone for the in-memory data source is
-being created.
-
-% MEM_DESTROY destroying zone '%1' in '%2' class
-Debug information. A zone from in-memory data source is being destroyed.
-
-% MEM_LOAD loading zone '%1' from file '%2'
-Debug information. The content of master file is being loaded into the memory.
-
-% MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
-Debug information. The contents of two in-memory zones are being exchanged.
-This is usual practice to do some manipulation in exception-safe manner -- the
-new data are prepared in a different zone object and when it works, they are
-swapped. The old one contains the new data and the other one can be safely
-destroyed.
-
-% MEM_ADD_ZONE adding zone '%1/%2'
-Debug information. A zone is being added into the in-memory data source.
-
-% MEM_FIND_ZONE looking for zone '%1'
-Debug information. A zone object for this zone is being searched for in the
-in-memory data source.
-
-% STATIC_CREATE creating the static datasource
-Debug information. The static data source (the one holding stuff like
-version.bind) is being created.
-
-% STATIC_BAD_CLASS static data source can handle CH only
-For some reason, someone asked the static data source a query that is not in
-the CH class.
-
-% STATIC_FIND looking for '%1/%2'
-Debug information. This resource record set is being looked up in the static
-data source.
-
-% SQLITE_FINDREC looking for record '%1/%2'
-Debug information. The SQLite data source is looking up records of given name
-and type in the database.
-
-% SQLITE_ENCLOSURE looking for zone containing '%1'
-Debug information. The SQLite data source is trying to identify, which zone
-should hold this domain.
-
-% SQLITE_ENCLOSURE_NOTFOUND no zone contains it
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
-no such zone in our data.
-
-% SQLITE_PREVIOUS looking for name previous to '%1'
-Debug information. We're trying to look up name preceding the supplied one.
-
-% SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
-
-% SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
-Debug information. We're trying to look up a NSEC3 record in the SQLite data
-source.
-
-% SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
-The SQLite data source was asked to provide a NSEC3 record for given zone.
-But it doesn't contain that zone.
-
-% SQLITE_FIND looking for RRset '%1/%2'
-Debug information. The SQLite data source is looking up a resource record
-set.
-
-% SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an RRset, but the data source contains
-different class than the query was for.
-
-% SQLITE_FINDEXACT looking for exact RRset '%1/%2'
-Debug information. The SQLite data source is looking up an exact resource
-record.
-
-% SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an exact RRset, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
-Debug information. The data source is looking up the addresses for given
-domain name.
-
-% SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
-The SQLite data source was looking up A/AAAA addresses, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDREF looking for referral at '%1'
-Debug information. The SQLite data source is identifying if this domain is
-a referral and where it goes.
-
-% SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
-The SQLite data source was trying to identify, if there's a referral. But
-it contains different class than the query was for.
-
-% SQLITE_CREATE sQLite data source created
-Debug information. An instance of SQLite data source is being created.
-
-% SQLITE_DESTROY sQLite data source destroyed
-Debug information. An instance of SQLite data source is being destroyed.
-
-% SQLITE_SETUP setting up SQLite database
-The database for SQLite data source was found empty. It is assumed this is the
-first run and it is being initialized with current schema.  It'll still contain
-no data, but it will be ready for use.
-
-% SQLITE_OPEN opening SQLite database '%1'
-Debug information. The SQLite data source is loading an SQLite database in
-the provided file.
-
-% SQLITE_CLOSE closing SQLite database
-Debug information. The SQLite data source is closing the database file.
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index aa9d062..6c4ef54 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -16,6 +16,9 @@ pydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
 pydnspp_la_SOURCES += tsig_python.cc tsig_python.h
 
 pydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+pydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 pydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
 
 # directly included from source files, so these don't have their own
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index eb0c136..61d7df6 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -41,3 +41,8 @@ endif
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index a42037b..d0f1d74 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -163,6 +163,17 @@ public:
         oss__ << stream; \
         throw type(__FILE__, __LINE__, oss__.str().c_str()); \
     } while (1)
+
+///
+/// Similar as isc_throw, but allows the exception to have one additional
+/// parameter (the stream/text goes first)
+#define isc_throw_1(type, stream, param1) \
+    do { \
+        std::ostringstream oss__; \
+        oss__ << stream; \
+        throw type(__FILE__, __LINE__, oss__.str().c_str(), param1); \
+    } while (1)
+
 }
 #endif // __EXCEPTIONS_H
 
diff --git a/src/lib/log/compiler/message.cc b/src/lib/log/compiler/message.cc
index 155aa55..53a24ee 100644
--- a/src/lib/log/compiler/message.cc
+++ b/src/lib/log/compiler/message.cc
@@ -266,12 +266,12 @@ writePythonFile(const string& file, MessageDictionary& dictionary) {
         "# File created from " << message_file.fullName() << " on " <<
             currentTime() << "\n" <<
         "\n" <<
-        "import isc.log.message\n" <<
+        "import isc.log\n" <<
         "\n";
 
     vector<string> idents(sortedIdentifiers(dictionary));
     BOOST_FOREACH(const string& ident, idents) {
-        pyfile << ident << " = isc.log.message.create(\"" <<
+        pyfile << ident << " = isc.log.create_message(\"" <<
             ident << "\", \"" << quoteString(dictionary.getText(ident)) <<
             "\")\n";
     }
diff --git a/src/lib/log/logger.cc b/src/lib/log/logger.cc
index 5a35d36..d10e979 100644
--- a/src/lib/log/logger.cc
+++ b/src/lib/log/logger.cc
@@ -18,6 +18,7 @@
 #include <log/logger.h>
 #include <log/logger_impl.h>
 #include <log/logger_name.h>
+#include <log/logger_support.h>
 #include <log/message_dictionary.h>
 #include <log/message_types.h>
 
@@ -28,9 +29,14 @@ using namespace std;
 namespace isc {
 namespace log {
 
-// Initialize Logger.
+// Initialize underlying logger, but only if logging has been initialized.
 void Logger::initLoggerImpl() {
-    loggerptr_ = new LoggerImpl(name_);
+    if (isLoggingInitialized()) {
+        loggerptr_ = new LoggerImpl(name_);
+    } else {
+        isc_throw(LoggingNotInitialized, "attempt to access logging function "
+                  "before logging has been initialized");
+    }
 }
 
 // Destructor.
diff --git a/src/lib/log/logger.h b/src/lib/log/logger.h
index 378db10..96168c0 100644
--- a/src/lib/log/logger.h
+++ b/src/lib/log/logger.h
@@ -18,6 +18,7 @@
 #include <cstdlib>
 #include <string>
 
+#include <exceptions/exceptions.h>
 #include <log/logger_level.h>
 #include <log/message_types.h>
 #include <log/log_formatter.h>
@@ -73,6 +74,17 @@ namespace log {
 
 class LoggerImpl;   // Forward declaration of the implementation class
 
+/// \brief Logging Not Initialized
+///
+/// Exception thrown if an attempt is made to access a logging function
+/// if the logging system has not been initialized.
+class LoggingNotInitialized : public isc::Exception {
+public:
+    LoggingNotInitialized(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what)
+    {}
+};
+
 /// \brief Logger Class
 ///
 /// This class is the main class used for logging.  Use comprises:
@@ -224,15 +236,14 @@ private:
 
     /// \brief Initialize Implementation
     ///
-    /// Returns the logger pointer.  If not yet set, the underlying
-    /// implementation class is initialized.\n
-    /// \n
-    /// The reason for this indirection is to avoid the "static initialization
-    /// fiacso", whereby we cannot rely on the order of static initializations.
-    /// The main problem is the root logger name - declared statically - which
-    /// is referenced by various loggers.  By deferring a reference to it until
-    /// after the program starts executing - by which time the root name object
-    /// will be initialized - we avoid this problem.
+    /// Returns the logger pointer.  If not yet set, the implementation class is
+    /// initialized.
+    ///
+    /// The main reason for this is to allow loggers to be declared statically
+    /// before the underlying logging system is initialized.  However, any
+    /// attempt to access a logging method on any logger before initialization -
+    /// regardless of whether is is statically or automatically declared -  will
+    /// cause a "LoggingNotInitialized" exception to be thrown.
     ///
     /// \return Returns pointer to implementation
     LoggerImpl* getLoggerPtr() {
diff --git a/src/lib/log/logger_impl.cc b/src/lib/log/logger_impl.cc
index cbf2f1f..046da13 100644
--- a/src/lib/log/logger_impl.cc
+++ b/src/lib/log/logger_impl.cc
@@ -98,7 +98,7 @@ LoggerImpl::getEffectiveDebugLevel() {
 // Output a general message
 string*
 LoggerImpl::lookupMessage(const MessageID& ident) {
-    return (new string(string(ident) + ", " +
+    return (new string(string(ident) + " " +
                        MessageDictionary::globalDictionary().getText(ident)));
 }
 
diff --git a/src/lib/log/logger_manager.cc b/src/lib/log/logger_manager.cc
index 9955a83..4d56e4b 100644
--- a/src/lib/log/logger_manager.cc
+++ b/src/lib/log/logger_manager.cc
@@ -15,10 +15,11 @@
 #include <algorithm>
 #include <vector>
 
-#include <log/logger.h>
+#include <log/logger_level.h>
 #include <log/logger_manager_impl.h>
 #include <log/logger_manager.h>
 #include <log/logger_name.h>
+#include <log/logger_support.h>
 #include <log/messagedef.h>
 #include <log/message_dictionary.h>
 #include <log/message_exception.h>
@@ -110,6 +111,7 @@ LoggerManager::init(const std::string& root, isc::log::Severity severity,
     // Initialize the implementation logging.  After this point, some basic
     // logging has been set up and messages can be logged.
     LoggerManagerImpl::init(severity, dbglevel);
+    setLoggingInitialized();
 
     // Check if there were any duplicate message IDs in the default dictionary
     // and if so, log them.  Log using the logging facility logger.
diff --git a/src/lib/log/logger_support.cc b/src/lib/log/logger_support.cc
index 78e28f3..73323a0 100644
--- a/src/lib/log/logger_support.cc
+++ b/src/lib/log/logger_support.cc
@@ -29,20 +29,37 @@
 #include <iostream>
 #include <string>
 
-#include <log/logger.h>
+#include <log/logger_level.h>
 #include <log/logger_manager.h>
 #include <log/logger_support.h>
 
+using namespace std;
+
+namespace {
+
+// Flag to hold logging initialization state.
+bool logging_init_state = false;
+
+} // Anonymous namespace
+
 namespace isc {
 namespace log {
 
-using namespace std;
+// Return initialization state.
+bool
+isLoggingInitialized() {
+    return (logging_init_state);
+}
 
-// Declare a logger for the logging subsystem.  This is a sub-logger of the
-// root logger and is used in all functions in this file.
-Logger logger("log");
+// Set initialization state.  (Note: as logging can be initialized via a direct
+// call to LoggerManager::init(), this function is called from there, not from
+// the initialization functions in this file.
+void
+setLoggingInitialized(bool state) {
+    logging_init_state = state;
+}
 
-/// Logger Run-Time Initialization
+// Logger Run-Time Initialization.
 
 void
 initLogger(const string& root, isc::log::Severity severity, int dbglevel,
@@ -50,7 +67,7 @@ initLogger(const string& root, isc::log::Severity severity, int dbglevel,
     LoggerManager::init(root, severity, dbglevel, file);
 }
 
-/// Logger Run-Time Initialization via Environment Variables
+// Logger Run-Time Initialization via Environment Variables
 void initLogger(isc::log::Severity severity, int dbglevel) {
 
     // Root logger name is defined by the environment variable B10_LOGGER_ROOT.
@@ -79,20 +96,20 @@ void initLogger(isc::log::Severity severity, int dbglevel) {
             try {
                 level = boost::lexical_cast<int>(dbg_char);
                 if (level < MIN_DEBUG_LEVEL) {
-                    std::cerr << "**ERROR** debug level of " << level
-                              << " is invalid - a value of " << MIN_DEBUG_LEVEL
-                              << " will be used\n";
+                    cerr << "**ERROR** debug level of " << level
+                         << " is invalid - a value of " << MIN_DEBUG_LEVEL
+                         << " will be used\n";
                     level = MIN_DEBUG_LEVEL;
                 } else if (level > MAX_DEBUG_LEVEL) {
-                    std::cerr << "**ERROR** debug level of " << level
-                              << " is invalid - a value of " << MAX_DEBUG_LEVEL
-                              << " will be used\n";
+                    cerr << "**ERROR** debug level of " << level
+                         << " is invalid - a value of " << MAX_DEBUG_LEVEL
+                         << " will be used\n";
                     level = MAX_DEBUG_LEVEL;
                 }
             } catch (...) {
                 // Error, but not fatal to the test
-                std::cerr << "**ERROR** Unable to translate "
-                             "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
+                cerr << "**ERROR** Unable to translate "
+                        "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
             }
             dbglevel = level;
         }
diff --git a/src/lib/log/logger_support.h b/src/lib/log/logger_support.h
index 5d574d3..4bc8acc 100644
--- a/src/lib/log/logger_support.h
+++ b/src/lib/log/logger_support.h
@@ -23,6 +23,26 @@
 namespace isc {
 namespace log {
 
+/// \brief Is logging initialized?
+///
+/// As some underlying logging implementations can behave unpredictably if they
+/// have not been initialized when a logging function is called, their
+/// initialization state is tracked.  The logger functions will check this flag
+/// and throw an exception if logging is not initialized at that point.
+///
+/// \return true if logging has been initialized, false if not
+bool isLoggingInitialized();
+
+/// \brief Set "logging initialized" flag
+///
+/// Sets the state of the "logging initialized" flag.
+///
+/// \param state State to set the flag to. (This is expected to be "true" - the
+///        default - for all code apart from specific unit tests.)
+void setLoggingInitialized(bool state = true);
+
+
+
 /// \brief Run-Time Initialization
 ///
 /// Performs run-time initialization of the logger in particular supplying:
@@ -70,7 +90,9 @@ void initLogger(const std::string& root,
 ///
 /// Any errors in the settings cause messages to be output to stderr.
 ///
-/// This function is most likely to be called from unit test programs.
+/// This function is aimed at test programs, allowing the default settings to
+/// be overridden by the tester.  It is not intended for use in production
+/// code.
 
 void initLogger(isc::log::Severity severity = isc::log::INFO,
                 int dbglevel = 0);
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index 93703c1..cd2ae29 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -19,6 +19,7 @@ run_unittests_SOURCES += logger_level_impl_unittest.cc
 run_unittests_SOURCES += logger_level_unittest.cc
 run_unittests_SOURCES += logger_manager_unittest.cc
 run_unittests_SOURCES += logger_name_unittest.cc
+run_unittests_SOURCES += logger_support_unittest.cc
 run_unittests_SOURCES += logger_unittest.cc
 run_unittests_SOURCES += logger_specification_unittest.cc
 run_unittests_SOURCES += message_dictionary_unittest.cc
diff --git a/src/lib/log/tests/destination_test.sh.in b/src/lib/log/tests/destination_test.sh.in
index ff2d3fb..e02141d 100755
--- a/src/lib/log/tests/destination_test.sh.in
+++ b/src/lib/log/tests/destination_test.sh.in
@@ -37,10 +37,10 @@ passfail() {
 
 echo "1. One logger, multiple destinations:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
 .
 rm -f $destfile1 $destfile2
 ./logger_example -s error -f $destfile1 -f $destfile2
@@ -61,13 +61,13 @@ rm -f $destfile1 $destfile2
 # Output for example.alpha should have done to destfile2.
 
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
-INFO  [example.beta] MSG_READERR, error reading from message file beta: info
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+INFO  [example.beta] MSG_READERR error reading from message file beta: info
 .
 echo -n  "   - destination 1:"
 cut -d' ' -f3- $destfile1 | diff $tempfile -
@@ -75,7 +75,7 @@ passfail $?
 
 echo -n  "   - destination 2:"
 cat > $tempfile << .
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
 .
 cut -d' ' -f3- $destfile2 | diff $tempfile -
 passfail $?
diff --git a/src/lib/log/tests/local_file_test.sh.in b/src/lib/log/tests/local_file_test.sh.in
index 53b0c2f..4308f96 100755
--- a/src/lib/log/tests/local_file_test.sh.in
+++ b/src/lib/log/tests/local_file_test.sh.in
@@ -37,36 +37,35 @@ passfail() {
 # Create the local message file for testing
 
 cat > $localmes << .
-\$PREFIX MSG_
-% NOTHERE     this message is not in the global dictionary
-% READERR     replacement read error, parameters: '%1' and '%2'
-% RDLOCMES    replacement read local message file, parameter is '%1'
+% MSG_NOTHERE     this message is not in the global dictionary
+% MSG_READERR     replacement read error, parameters: '%1' and '%2'
+% MSG_RDLOCMES    replacement read local message file, parameter is '%1'
 .
 
 echo -n "1. Local message replacement:"
 cat > $tempfile << .
-WARN  [example.log] MSG_IDNOTFND, could not replace message text for 'MSG_NOTHERE': no such message
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, replacement read local message file, parameter is 'dummy/file'
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, replacement read error, parameters: 'a.txt' and 'dummy reason'
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
+WARN  [example.log] MSG_IDNOTFND could not replace message text for 'MSG_NOTHERE': no such message
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES replacement read local message file, parameter is 'dummy/file'
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR replacement read error, parameters: 'a.txt' and 'dummy reason'
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
 .
 ./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
 passfail $?
 
 echo -n "2. Report error if unable to read local message file:"
 cat > $tempfile << .
-ERROR [example.log] MSG_OPENIN, unable to open message file $localmes for input: No such file or directory
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
+ERROR [example.log] MSG_OPENIN unable to open message file $localmes for input: No such file or directory
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
 .
 rm -f $localmes
 ./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
diff --git a/src/lib/log/tests/logger_support_unittest.cc b/src/lib/log/tests/logger_support_unittest.cc
new file mode 100644
index 0000000..7e5d23a
--- /dev/null
+++ b/src/lib/log/tests/logger_support_unittest.cc
@@ -0,0 +1,72 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <log/messagedef.h>
+
+using namespace isc::log;
+
+// Check that the initialized flag can be manipulated.  This is a bit chicken-
+// -and-egg: we want to reset to the flag to the original value at the end
+// of the test, so use the functions to do that.  But we are trying to check
+// that these functions in fact work.
+
+TEST(LoggerSupportTest, InitializedFlag) {
+    bool current_flag = isLoggingInitialized();
+
+    // check we can flip the flag.
+    setLoggingInitialized(!current_flag);
+    EXPECT_NE(current_flag, isLoggingInitialized());
+    setLoggingInitialized(!isLoggingInitialized());
+    EXPECT_EQ(current_flag, isLoggingInitialized());
+
+    // Check we can set it to explicit values (tests that a call to the "set"
+    // function does not just flip the flag).
+    setLoggingInitialized(false);
+    EXPECT_FALSE(isLoggingInitialized());
+    setLoggingInitialized(false);
+    EXPECT_FALSE(isLoggingInitialized());
+
+    setLoggingInitialized(true);
+    EXPECT_TRUE(isLoggingInitialized());
+    setLoggingInitialized(true);
+    EXPECT_TRUE(isLoggingInitialized());
+
+    // Reset to original value
+    setLoggingInitialized(current_flag);
+}
+
+// Check that a logger will throw an exception if logging has not been
+// initialized.
+
+TEST(LoggerSupportTest, LoggingInitializationCheck) {
+
+    // Assert that logging has been initialized (it should be in main()).
+    bool current_flag = isLoggingInitialized();
+    EXPECT_TRUE(current_flag);
+
+    // Flag that it has not been initialized and declare a logger. Any logging
+    // operation should then throw.
+    setLoggingInitialized(false);
+    isc::log::Logger test_logger("test");
+
+    EXPECT_THROW(test_logger.isDebugEnabled(), LoggingNotInitialized);
+    EXPECT_THROW(test_logger.info(MSG_OPENIN), LoggingNotInitialized);
+
+    // ... and check that they work when logging is initialized.
+    setLoggingInitialized(true);
+    EXPECT_NO_THROW(test_logger.isDebugEnabled());
+    EXPECT_NO_THROW(test_logger.info(MSG_OPENIN));
+}
diff --git a/src/lib/log/tests/severity_test.sh.in b/src/lib/log/tests/severity_test.sh.in
index 6f4d27a..0a304e0 100755
--- a/src/lib/log/tests/severity_test.sh.in
+++ b/src/lib/log/tests/severity_test.sh.in
@@ -35,44 +35,44 @@ passfail() {
 
 echo -n "1. runInitTest default parameters:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
-INFO  [example.alpha] MSG_OPENIN, unable to open message file example.msg for input: dummy reason
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
-INFO  [example.beta] MSG_READERR, error reading from message file beta: info
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+INFO  [example.alpha] MSG_OPENIN unable to open message file example.msg for input: dummy reason
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+INFO  [example.beta] MSG_READERR error reading from message file beta: info
 .
 ./logger_example -c stdout | cut -d' ' -f3- | diff $tempfile -
 passfail $?
 
 echo -n "2. Severity filter:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
 .
 ./logger_example -c stdout -s error | cut -d' ' -f3- | diff $tempfile -
 passfail $?
 
 echo -n "3. Debug level:"
 cat > $tempfile << .
-FATAL [example] MSG_WRITERR, error writing to test1: 42
-ERROR [example] MSG_RDLOCMES, reading local message file dummy/file
-WARN  [example] MSG_BADSTREAM, bad log console output stream: example
-WARN  [example.alpha] MSG_READERR, error reading from message file a.txt: dummy reason
-INFO  [example.alpha] MSG_OPENIN, unable to open message file example.msg for input: dummy reason
-DEBUG [example] MSG_RDLOCMES, reading local message file example/0
-DEBUG [example] MSG_RDLOCMES, reading local message file example/24
-DEBUG [example] MSG_RDLOCMES, reading local message file example/25
-FATAL [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION, unrecognized log destination: beta_error
-WARN  [example.beta] MSG_BADSTREAM, bad log console output stream: beta_warn
-INFO  [example.beta] MSG_READERR, error reading from message file beta: info
-DEBUG [example.beta] MSG_BADSEVERITY, unrecognized log severity: beta/25
+FATAL [example] MSG_WRITERR error writing to test1: 42
+ERROR [example] MSG_RDLOCMES reading local message file dummy/file
+WARN  [example] MSG_BADSTREAM bad log console output stream: example
+WARN  [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+INFO  [example.alpha] MSG_OPENIN unable to open message file example.msg for input: dummy reason
+DEBUG [example] MSG_RDLOCMES reading local message file example/0
+DEBUG [example] MSG_RDLOCMES reading local message file example/24
+DEBUG [example] MSG_RDLOCMES reading local message file example/25
+FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+WARN  [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+INFO  [example.beta] MSG_READERR error reading from message file beta: info
+DEBUG [example.beta] MSG_BADSEVERITY unrecognized log severity: beta/25
 .
 ./logger_example -c stdout -s debug -d 25 | cut -d' ' -f3- | diff $tempfile -
 passfail $?
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index 75e5afb..5924294 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -11,3 +11,7 @@ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in bind10_config.py.in
 EXTRA_DIST =  bind10_config.py.in
 
 CLEANFILES = bind10_config.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index 7a54909..bfc5a91 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = datasrc cc config log net notify util testutils
 python_PYTHON = __init__.py
 
 pythondir = $(pyexecdir)/isc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 9204792..8fcbf42 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -2,4 +2,3 @@ import isc.datasrc
 import isc.cc
 import isc.config
 #import isc.dns
-import isc.log
diff --git a/src/lib/python/isc/cc/Makefile.am b/src/lib/python/isc/cc/Makefile.am
index a2246db..b0ba3b2 100644
--- a/src/lib/python/isc/cc/Makefile.am
+++ b/src/lib/python/isc/cc/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
 python_PYTHON =	__init__.py data.py session.py message.py
 
 pythondir = $(pyexecdir)/isc/cc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index dc19758..4e49501 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -6,6 +6,13 @@ EXTRA_DIST = $(PYTESTS)
 EXTRA_DIST += sendcmd.py
 EXTRA_DIST += test_session.py
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -15,6 +22,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
 	BIND10_TEST_SOCKET_FILE=$(builddir)/test_socket.sock \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index 916a522..516d069 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
 python_PYTHON = __init__.py ccsession.py cfgmgr.py config_data.py module_spec.py
 
 pythondir = $(pyexecdir)/isc/config
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 84809f1..0869160 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -39,6 +39,10 @@
 from isc.cc import Session
 from isc.config.config_data import ConfigData, MultiConfigData, BIND10_CONFIG_DATA_VERSION
 import isc
+from isc.util.file import path_search
+import bind10_config
+from isc.log import log_config_update
+import json
 
 class ModuleCCSessionError(Exception): pass
 
@@ -116,6 +120,18 @@ def create_command(command_name, params = None):
     msg = { 'command': cmd }
     return msg
 
+def default_logconfig_handler(new_config, config_data):
+    errors = []
+
+    if config_data.get_module_spec().validate_config(False, new_config, errors):
+        isc.log.log_config_update(json.dumps(new_config),
+            json.dumps(config_data.get_module_spec().get_full_spec()))
+    else:
+        # no logging here yet, TODO: log these errors
+        print("Error in logging configuration, ignoring config update: ")
+        for err in errors:
+            print(err)
+
 class ModuleCCSession(ConfigData):
     """This class maintains a connection to the command channel, as
        well as configuration options for modules. The module provides
@@ -126,7 +142,7 @@ class ModuleCCSession(ConfigData):
        callbacks are called when 'check_command' is called on the
        ModuleCCSession"""
        
-    def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
+    def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None, handle_logging_config = False):
         """Initialize a ModuleCCSession. This does *NOT* send the
            specification and request the configuration yet. Use start()
            for that once the ModuleCCSession has been initialized.
@@ -149,6 +165,11 @@ class ModuleCCSession(ConfigData):
         self._session.group_subscribe(self._module_name, "*")
 
         self._remote_module_configs = {}
+        self._remote_module_callbacks = {}
+
+        if handle_logging_config:
+            self.add_remote_config(path_search('logging.spec', bind10_config.PLUGIN_PATHS),
+                                   default_logconfig_handler)
 
     def __del__(self):
         # If the CC Session obejct has been closed, it returns
@@ -218,6 +239,9 @@ class ModuleCCSession(ConfigData):
                             newc = self._remote_module_configs[module_name].get_local_config()
                             isc.cc.data.merge(newc, new_config)
                             self._remote_module_configs[module_name].set_local_config(newc)
+                            if self._remote_module_callbacks[module_name] != None:
+                                self._remote_module_callbacks[module_name](new_config,
+                                                                           self._remote_module_configs[module_name])
                         # For other modules, we're not supposed to answer
                         return
 
@@ -260,7 +284,7 @@ class ModuleCCSession(ConfigData):
            and return an answer created with create_answer()"""
         self._command_handler = command_handler
 
-    def add_remote_config(self, spec_file_name):
+    def add_remote_config(self, spec_file_name, config_update_callback = None):
         """Gives access to the configuration of a different module.
            These remote module options can at this moment only be
            accessed through get_remote_config_value(). This function
@@ -289,9 +313,12 @@ class ModuleCCSession(ConfigData):
             if rcode == 0:
                 if value != None and module_spec.validate_config(False, value):
                     module_cfg.set_local_config(value);
+                    if config_update_callback is not None:
+                        config_update_callback(value, module_cfg)
 
         # all done, add it
         self._remote_module_configs[module_name] = module_cfg
+        self._remote_module_callbacks[module_name] = config_update_callback
         return module_name
         
     def remove_remote_config(self, module_name):
@@ -299,6 +326,7 @@ class ModuleCCSession(ConfigData):
         if module_name in self._remote_module_configs:
             self._session.group_unsubscribe(module_name)
             del self._remote_module_configs[module_name]
+            del self._remote_module_callbacks[module_name]
 
     def get_remote_config_value(self, module_name, identifier):
         """Returns the current setting for the given identifier at the
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 622b23c..b5f5501 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -4,6 +4,13 @@ PYTESTS += module_spec_test.py
 EXTRA_DIST = $(PYTESTS)
 EXTRA_DIST += unittest_fakesession.py
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -13,8 +20,15 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
 	CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
 	CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index a0cafd6..830cbd7 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -22,6 +22,7 @@ import os
 from isc.config.ccsession import *
 from isc.config.config_data import BIND10_CONFIG_DATA_VERSION
 from unittest_fakesession import FakeModuleCCSession, WouldBlockForever
+import bind10_config
 
 class TestHelperFunctions(unittest.TestCase):
     def test_parse_answer(self):
@@ -604,7 +605,43 @@ class TestModuleCCSession(unittest.TestCase):
         self.assertEqual(len(fake_session.message_queue), 1)
         mccs.check_command()
         self.assertEqual(len(fake_session.message_queue), 0)
-        
+
+    def test_logconfig_handler(self):
+        # test whether default_logconfig_handler reacts nicely to
+        # bad data. We assume the actual logger output is tested
+        # elsewhere
+        self.assertRaises(TypeError, default_logconfig_handler);
+        self.assertRaises(TypeError, default_logconfig_handler, 1);
+
+        spec = isc.config.module_spec_from_file(
+            path_search('logging.spec', bind10_config.PLUGIN_PATHS))
+        config_data = ConfigData(spec)
+
+        self.assertRaises(TypeError, default_logconfig_handler, 1, config_data)
+
+        default_logconfig_handler({}, config_data)
+
+        # Wrong data should not raise, but simply not be accepted
+        # This would log a lot of errors, so we may want to suppress that later
+        default_logconfig_handler({ "bad_data": "indeed" }, config_data)
+        default_logconfig_handler({ "bad_data": 1}, config_data)
+        default_logconfig_handler({ "bad_data": 1123 }, config_data)
+        default_logconfig_handler({ "bad_data": True }, config_data)
+        default_logconfig_handler({ "bad_data": False }, config_data)
+        default_logconfig_handler({ "bad_data": 1.1 }, config_data)
+        default_logconfig_handler({ "bad_data": [] }, config_data)
+        default_logconfig_handler({ "bad_data": [[],[],[[1, 3, False, "foo" ]]] },
+                                  config_data)
+        default_logconfig_handler({ "bad_data": [ 1, 2, { "b": { "c": "d" } } ] },
+                                  config_data)
+
+        # Try a correct config
+        log_conf = {"loggers":
+                       [{"name": "b10-xfrout", "output_options":
+                           [{"output": "/tmp/bind10.log",
+                                       "destination": "file",
+                                       "flush": True}]}]}
+        default_logconfig_handler(log_conf, config_data)
 
 class fakeData:
     def decode(self):
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 5b9dafb..46fb661 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
 python_PYTHON = __init__.py master.py sqlite3_ds.py
 
 pythondir = $(pyexecdir)/isc/datasrc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index a6c5c58..6f6d157 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -6,6 +6,13 @@ EXTRA_DIST += testdata/brokendb.sqlite3
 EXTRA_DIST += testdata/example.com.sqlite3
 CLEANFILES = $(abs_builddir)/example.com.out.sqlite3
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -15,6 +22,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
 	TESTDATA_PATH=$(abs_srcdir)/testdata \
 	TESTDATA_WRITE_PATH=$(abs_builddir) \
diff --git a/src/lib/python/isc/log/Makefile.am b/src/lib/python/isc/log/Makefile.am
index acd2acc..b228caf 100644
--- a/src/lib/python/isc/log/Makefile.am
+++ b/src/lib/python/isc/log/Makefile.am
@@ -1,8 +1,32 @@
 SUBDIRS = . tests
 
-python_PYTHON = __init__.py log.py
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-pythondir = $(pyexecdir)/isc/log
+pythondir = $(pyexecdir)/isc
+python_LTLIBRARIES = log.la
+log_la_SOURCES = log.cc
+
+log_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+log_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+log_la_LDFLAGS = $(PYTHON_LDFLAGS)
+log_la_LDFLAGS += -module
+log_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
+log_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+log_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
+log_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+log_la_LIBADD += $(PYTHON_LIB)
+
+# This is not installed, it helps locate the module during tests
+EXTRA_DIST = __init__.py
 
 pytest:
 	$(SHELL) tests/log_test
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log/__init__.py b/src/lib/python/isc/log/__init__.py
index a34e164..641cf79 100644
--- a/src/lib/python/isc/log/__init__.py
+++ b/src/lib/python/isc/log/__init__.py
@@ -1 +1,33 @@
-from isc.log.log import *
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed. The log.so is installed into the right place.
+# It is only to find it in the .libs directory when we run as a test or
+# from the build directory.
+# But as nobody gives us the builddir explicitly (and we can't use generation
+# from .in file, as it would put us into the builddir and we wouldn't be found)
+# we guess from current directory. Any idea for something better? This should
+# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
+# Should we look there? Or define something in bind10_config?
+
+import os
+import sys
+
+for base in sys.path[:]:
+    loglibdir = os.path.join(base, 'isc/log/.libs')
+    if os.path.exists(loglibdir):
+        sys.path.insert(0, loglibdir)
+
+from log import *
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
new file mode 100644
index 0000000..484151f
--- /dev/null
+++ b/src/lib/python/isc/log/log.cc
@@ -0,0 +1,701 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <log/message_dictionary.h>
+#include <log/logger_manager.h>
+#include <log/logger.h>
+
+#include <config/ccsession.h>
+
+#include <string>
+#include <boost/bind.hpp>
+
+using namespace isc::log;
+using std::string;
+using boost::bind;
+
+// We encountered a strange problem with Clang (clang version 2.8
+// (tags/RELEASE_28 115909)) on OSX, where unwinding the stack
+// segfaults the moment this exception was thrown and caught.
+//
+// Placing it in a named namespace instead of the original
+// unnamed namespace appears to solve this, so as a temporary
+// workaround, we create a local randomly named namespace here
+// to solve this issue.
+namespace clang_unnamed_namespace_workaround {
+    // To propagate python exceptions through our code
+    // This exception is used to signal to the calling function that a
+    // proper Python Exception has already been set, and the caller
+    // should now return NULL.
+    // Since it is only used internally, and should not pass any
+    // information itself, is is not derived from std::exception
+    class InternalError : public std::exception {};
+}
+using namespace clang_unnamed_namespace_workaround;
+
+namespace {
+
+// This is for testing only. The real module will have it always set as
+// NULL and will use the global dictionary.
+MessageDictionary* testDictionary = NULL;
+
+PyObject*
+setTestDictionary(PyObject*, PyObject* args) {
+    PyObject* enableO;
+    // The API doesn't seem to provide conversion to bool,
+    // so we do it little bit manually
+    if (!PyArg_ParseTuple(args, "O", &enableO)) {
+        return (NULL);
+    }
+    int enableI(PyObject_IsTrue(enableO));
+    if (enableI == -1) {
+        return (NULL);
+    }
+    bool enable(enableI != 0);
+
+    try {
+        delete testDictionary;
+        testDictionary = NULL;
+        if (enable) {
+            testDictionary = new MessageDictionary;
+        }
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+    Py_RETURN_NONE;
+}
+
+PyObject*
+createMessage(PyObject*, PyObject* args) {
+    const char* mid;
+    const char* text;
+    // We parse the strings
+    if (!PyArg_ParseTuple(args, "ss", &mid, &text)) {
+        return (NULL);
+    }
+    PyObject* origMid;
+    // And extract the original representation of the message
+    // ID, so we can return it instead of creating another instance.
+    // This call shouldn't fail if the previous suceeded.
+    if (!PyArg_ParseTuple(args, "Os", &origMid, &text)) {
+        return (NULL);
+    }
+
+    try {
+        MessageDictionary* dict = testDictionary ? testDictionary :
+            &MessageDictionary::globalDictionary();
+
+        // We ignore the result, they will be in some kind of dupe list
+        // if there's a problem
+        dict->add(mid, text);
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+
+    // Return the ID
+    Py_INCREF(origMid);
+    return (origMid);
+}
+
+PyObject*
+getMessage(PyObject*, PyObject* args) {
+    const char* mid;
+    if (!PyArg_ParseTuple(args, "s", &mid)) {
+        return (NULL);
+    }
+
+    try {
+        MessageDictionary* dict = testDictionary ? testDictionary :
+            &MessageDictionary::globalDictionary();
+
+        const std::string& result(dict->getText(mid));
+        if (result.empty()) {
+            Py_RETURN_NONE;
+        } else {
+            return (Py_BuildValue("s", result.c_str()));
+        }
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+reset(PyObject*, PyObject*) {
+    LoggerManager::reset();
+    Py_RETURN_NONE;
+}
+
+PyObject*
+init(PyObject*, PyObject* args) {
+    const char* root;
+    const char* file(NULL);
+    const char* severity("INFO");
+    int dbglevel(0);
+    if (!PyArg_ParseTuple(args, "s|siz", &root, &severity, &dbglevel, &file)) {
+        return (NULL);
+    }
+
+    try {
+        LoggerManager::init(root, getSeverity(severity), dbglevel, file);
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+    Py_RETURN_NONE;
+}
+
+PyObject*
+logConfigUpdate(PyObject*, PyObject* args) {
+    // we have no wrappers for ElementPtr and ConfigData,
+    // So we expect JSON strings and convert them.
+    // The new_config object is assumed to have been validated.
+
+    const char* new_config_json;
+    const char* mod_spec_json;
+    if (!PyArg_ParseTuple(args, "ss",
+                          &new_config_json, &mod_spec_json)) {
+        return (NULL);
+    }
+
+    try {
+        isc::data::ConstElementPtr new_config =
+            isc::data::Element::fromJSON(new_config_json);
+        isc::data::ConstElementPtr mod_spec_e =
+            isc::data::Element::fromJSON(mod_spec_json);
+        isc::config::ModuleSpec mod_spec(mod_spec_e);
+        isc::config::ConfigData config_data(mod_spec);
+        isc::config::default_logconfig_handler("logging", new_config,
+                                               config_data);
+
+        Py_RETURN_NONE;
+    } catch (const isc::data::JSONError& je) {
+        std::string error_msg = std::string("JSON format error: ") + je.what();
+        PyErr_SetString(PyExc_TypeError, error_msg.c_str());
+    } catch (const isc::data::TypeError& de) {
+        PyErr_SetString(PyExc_TypeError, "argument 1 of log_config_update "
+                                         "is not a map of config data");
+    } catch (const isc::config::ModuleSpecError& mse) {
+        PyErr_SetString(PyExc_TypeError, "argument 2 of log_config_update "
+                                         "is not a correct module specification");
+    } catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+    } catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+    }
+    return (NULL);
+}
+
+PyMethodDef methods[] = {
+    {"set_test_dictionary", setTestDictionary, METH_VARARGS,
+        "Set or unset testing mode for message dictionary. In testing, "
+        "the create_message and get_message functions work on different "
+        "than the logger-global dictionary, not polluting it."},
+    {"create_message", createMessage, METH_VARARGS,
+        "Creates a new message in the dictionary. You shouldn't need to "
+        "call this directly, it should be called by the generated message "
+        "file. Returns the identifier to be used in logging. The text "
+        "shouldn't be empty."},
+    {"get_message", getMessage, METH_VARARGS,
+        "Get a message. This function is for testing purposes and you don't "
+        "need to call it. It returns None if the message does not exist."},
+    {"reset", reset, METH_NOARGS,
+        "Reset all logging. For testing purposes only, do not use."},
+    {"init", init, METH_VARARGS,
+        "Run-time initialization. You need to call this before you do any "
+        "logging, to configure the root logger name. You may also provide "
+        "logging severity (one of 'DEBUG', 'INFO', 'WARN', 'ERROR' or "
+        "'FATAL'), a debug level (integer in the range 0-99) and a file name "
+        "of a dictionary with message text translations."},
+    {"log_config_update", logConfigUpdate, METH_VARARGS,
+        "Update logger settings. This method is automatically used when "
+        "ModuleCCSession is initialized with handle_logging_config set "
+        "to True. When called, the first argument is the new logging "
+        "configuration (in JSON format). The second argument is "
+        "the raw specification (as returned from "
+        "ConfigData.get_module_spec().get_full_spec(), and converted to "
+        "JSON format).\n"
+        "Raises a TypeError if either argument is not a (correct) JSON "
+        "string, or if the spec is not a correct spec.\n"
+        "If this call succeeds, the global logger settings have "
+        "been updated."
+    },
+    {NULL, NULL, 0, NULL}
+};
+
+class LoggerWrapper : public PyObject {
+// Everything is public here, as it is accessible only inside this .cc file.
+public:
+    Logger *logger_;
+};
+
+extern PyTypeObject logger_type;
+
+int
+Logger_init(LoggerWrapper* self, PyObject* args) {
+    const char* name;
+    if (!PyArg_ParseTuple(args, "s", &name)) {
+        return (-1);
+    }
+    try {
+        self->logger_ = new Logger(name);
+        return (0);
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (-1);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (-1);
+    }
+}
+
+void
+Logger_destroy(LoggerWrapper* const self) {
+    delete self->logger_;
+    self->logger_ = NULL;
+    Py_TYPE(self)->tp_free(self);
+}
+
+// The isc::log doesn't contain function to convert this way
+const char*
+severityToText(const Severity& severity) {
+    switch (severity) {
+        case DEFAULT:
+            return ("DEFAULT");
+        case DEBUG:
+            return ("DEBUG");
+        case INFO:
+            return ("INFO");
+        case WARN:
+            return ("WARN");
+        case ERROR:
+            return ("ERROR");
+        case FATAL:
+            return ("FATAL");
+        default:
+            return (NULL);
+    }
+}
+
+PyObject*
+Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+    try {
+        return (Py_BuildValue("s",
+                              severityToText(
+                                  self->logger_->getEffectiveSeverity())));
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+    try {
+        return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+    const char* severity;
+    int dbgLevel = 0;
+    if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
+        return (NULL);
+    }
+    try {
+        self->logger_->setSeverity((severity == NULL) ? DEFAULT :
+                                   getSeverity(severity), dbgLevel);
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+    Py_RETURN_NONE;
+}
+
+template<class FPtr> // Who should remember the pointer-to-method syntax
+PyObject*
+Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
+    try {
+        if ((self->logger_->*function)()) {
+            Py_RETURN_TRUE;
+        } else {
+            Py_RETURN_FALSE;
+        }
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+    return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
+}
+
+PyObject*
+Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+    return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
+}
+
+PyObject*
+Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+    return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
+}
+
+PyObject*
+Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+    return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
+}
+
+PyObject*
+Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+    int level = MIN_DEBUG_LEVEL;
+    if (!PyArg_ParseTuple(args, "|i", &level)) {
+        return (NULL);
+    }
+
+    try {
+        if (self->logger_->isDebugEnabled(level)) {
+            Py_RETURN_TRUE;
+        } else {
+            Py_RETURN_FALSE;
+        }
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+}
+
+string
+objectToStr(PyObject* object, bool convert) {
+    PyObject* cleanup(NULL);
+    if (convert) {
+        object = cleanup = PyObject_Str(object);
+        if (object == NULL) {
+            throw InternalError();
+        }
+    }
+    const char* value;
+    PyObject* tuple(Py_BuildValue("(O)", object));
+    if (tuple == NULL) {
+        if (cleanup != NULL) {
+            Py_DECREF(cleanup);
+        }
+        throw InternalError();
+    }
+
+    if (!PyArg_ParseTuple(tuple, "s", &value)) {
+        Py_DECREF(tuple);
+        if (cleanup != NULL) {
+            Py_DECREF(cleanup);
+        }
+        throw InternalError();
+    }
+    string result(value);
+    Py_DECREF(tuple);
+    if (cleanup != NULL) {
+        Py_DECREF(cleanup);
+    }
+    return (result);
+}
+
+// Generic function to output the logging message. Called by the real functions.
+template<class Function>
+PyObject*
+Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
+    try {
+        Py_ssize_t number(PyObject_Length(args));
+        if (number < 0) {
+            return (NULL);
+        }
+
+        // Which argument is the first to format?
+        size_t start(1);
+        if (dbgLevel) {
+            start ++;
+        }
+
+        if (number < start) {
+            return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
+                                 "logging call, at least %zu needed and %zd "
+                                 "given", start, number));
+        }
+
+        // Extract the fixed arguments
+        PyObject *midO(PySequence_GetItem(args, start - 1));
+        if (midO == NULL) {
+            return (NULL);
+        }
+        string mid(objectToStr(midO, false));
+        long dbg(0);
+        if (dbgLevel) {
+            PyObject *dbgO(PySequence_GetItem(args, 0));
+            if (dbgO == NULL) {
+                return (NULL);
+            }
+            dbg = PyLong_AsLong(dbgO);
+            if (PyErr_Occurred()) {
+                return (NULL);
+            }
+        }
+
+        // We create the logging message right now. If we fail to convert a
+        // parameter to string, at least the part that we already did will
+        // be output
+        Logger::Formatter formatter(function(dbg, mid.c_str()));
+
+        // Now process the rest of parameters, convert each to string and put
+        // into the formatter. It will print itself in the end.
+        for (size_t i(start); i < number; ++ i) {
+            PyObject* param(PySequence_GetItem(args, i));
+            if (param == NULL) {
+                return (NULL);
+            }
+            formatter = formatter.arg(objectToStr(param, true));
+        }
+        Py_RETURN_NONE;
+    }
+    catch (const InternalError&) {
+        return (NULL);
+    }
+    catch (const std::exception& e) {
+        PyErr_SetString(PyExc_RuntimeError, e.what());
+        return (NULL);
+    }
+    catch (...) {
+        PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+        return (NULL);
+    }
+}
+
+// Now map the functions into the performOutput. I wish C++ could do
+// functional programming.
+PyObject*
+Logger_debug(LoggerWrapper* self, PyObject* args) {
+    return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
+                                 args, true));
+}
+
+PyObject*
+Logger_info(LoggerWrapper* self, PyObject* args) {
+    return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
+                                 args, false));
+}
+
+PyObject*
+Logger_warn(LoggerWrapper* self, PyObject* args) {
+    return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
+                                 args, false));
+}
+
+PyObject*
+Logger_error(LoggerWrapper* self, PyObject* args) {
+    return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
+                                 args, false));
+}
+
+PyObject*
+Logger_fatal(LoggerWrapper* self, PyObject* args) {
+    return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
+                                 args, false));
+}
+
+PyMethodDef loggerMethods[] = {
+    { "get_effective_severity",
+        reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
+        METH_NOARGS, "Returns the effective logging severity as string" },
+    { "get_effective_debug_level",
+        reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
+        METH_NOARGS, "Returns the current debug level." },
+    { "set_severity",
+        reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+        "Sets the severity of a logger. The parameters are severity as a "
+        "string and, optionally, a debug level (integer in range 0-99). "
+        "The severity may be NULL, in which case an inherited value is taken."
+    },
+    { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
+        METH_VARARGS, "Returns if the logger would log debug message now. "
+            "You can provide a desired debug level." },
+    { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
+        METH_NOARGS, "Returns if the logger would log info message now." },
+    { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
+        METH_NOARGS, "Returns if the logger would log warn message now." },
+    { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
+        METH_NOARGS, "Returns if the logger would log error message now." },
+    { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
+        METH_NOARGS, "Returns if the logger would log fatal message now." },
+    { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+        "Logs a debug-severity message. It takes the debug level, message ID "
+        "and any number of stringifiable arguments to the message." },
+    { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+        "Logs a info-severity message. It taskes the message ID and any "
+        "number of stringifiable arguments to the message." },
+    { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+        "Logs a warn-severity message. It taskes the message ID and any "
+        "number of stringifiable arguments to the message." },
+    { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+        "Logs a error-severity message. It taskes the message ID and any "
+        "number of stringifiable arguments to the message." },
+    { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+        "Logs a fatal-severity message. It taskes the message ID and any "
+        "number of stringifiable arguments to the message." },
+    { NULL, NULL, 0, NULL }
+};
+
+PyTypeObject logger_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "isc.log.Logger",
+    sizeof(LoggerWrapper),                 // tp_basicsize
+    0,                                  // tp_itemsize
+    reinterpret_cast<destructor>(Logger_destroy),       // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash 
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "Wrapper around the C++ isc::log::Logger class."
+    "It is not complete, but everything important should be here.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    loggerMethods,                      // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    reinterpret_cast<initproc>(Logger_init),            // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyModuleDef iscLog = {
+    { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+    "log",
+    "Python bindings for the classes in the isc::log namespace.\n\n"
+    "These bindings are close match to the C++ API, but they are not complete "
+    "(some parts are not needed) and some are done in more python-like ways.",
+    -1,
+    methods,
+    NULL,
+    NULL,
+    NULL,
+    NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_log(void) {
+    PyObject* mod = PyModule_Create(&iscLog);
+    if (mod == NULL) {
+        return (NULL);
+    }
+
+    if (PyType_Ready(&logger_type) < 0) {
+        return (NULL);
+    }
+
+    if (PyModule_AddObject(mod, "Logger",
+                           static_cast<PyObject*>(static_cast<void*>(
+                               &logger_type))) < 0) {
+        return (NULL);
+    }
+    Py_INCREF(&logger_type);
+
+    return (mod);
+}
diff --git a/src/lib/python/isc/log/log.py b/src/lib/python/isc/log/log.py
deleted file mode 100644
index 74261e2..0000000
--- a/src/lib/python/isc/log/log.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""This module is to convert python logging module over
-to log4python.
-Copyright (C) 2010  Internet Systems Consortium.
-To use, simply 'import isc.log.log' and log away!
-"""
-import os
-import sys
-import syslog
-import logging
-import logging.handlers
-
-"""LEVELS: logging levels mapping
-"""
-LEVELS = {'debug' : logging.DEBUG,
-       	  'info' : logging.INFO,
-          'warning' : logging.WARNING,
-          'error' : logging.ERROR,
-          'critical' : logging.CRITICAL}
-
-FORMATTER = logging.Formatter("%(name)s: %(levelname)s: %(message)s")
-TIME_FORMATTER = logging.Formatter("%(asctime)s.%(msecs)03d %(name)s: %(levelname)s: %(message)s",
-                                   "%d-%b-%Y %H:%M:%S")
-
-def log_err(err_type, err_msg):
-    sys.stderr.write(err_type + ": " + "%s.\n" % str(err_msg)[str(err_msg).find(']')+1:])
-
-
-class NSFileLogHandler(logging.handlers.RotatingFileHandler):
-    """RotatingFileHandler: replace RotatingFileHandler with a custom handler"""
-
-    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
-        abs_file_name = self._get_abs_file_path(filename)
-        """Create log directory beforehand, because the underlying logging framework won't 
-        create non-exsiting log directory on writing logs.
-        """
-        if not (os.path.exists(os.path.dirname(abs_file_name))):
-            os.makedirs(os.path.dirname(abs_file_name))
-        super(NSFileLogHandler, self).__init__(abs_file_name, mode, maxBytes,
-                                                backupCount, encoding, delay)
-
-    def handleError(self, record):
-        """Overwrite handleError to provide more user-friendly error messages"""
-        if logging.raiseExceptions:
-            ei = sys.exc_info()
-            if (ei[1]):
-                sys.stderr.write("[b10-logging] : " + str(ei[1]))
-
-    def _get_abs_file_path(self, file_name):
-        """ Get absolute file path"""
-        # For a bare filename, log_dir will be set the current directory.
-        if not os.path.dirname(file_name):
-            abs_file_dir = os.getcwd()
-        else:
-            abs_file_dir = os.path.abspath(os.path.dirname(file_name))
-        abs_file_name = os.path.join(abs_file_dir, os.path.basename(file_name))
-        return abs_file_name
-
-    def shouldRollover(self, record):
-        """Rewrite RotatingFileHandler.shouldRollover. 
-       
-        If the log file is deleted at runtime, a new file will be created.
-        """
-        dfn = self.baseFilename
-        if (self.stream) and (not os.path.exists(dfn)): #Does log file exist?
-            self.stream = None
-            """ Log directory may be deleted while bind10 running or updated with a
-             non-existing directory. Need to create log directory beforehand, because
-             the underlying logging framework won't create non-exsiting log directory
-             on writing logs.
-             """
-            if not (os.path.exists(os.path.dirname(dfn))): #Does log subdirectory exist?
-                os.makedirs(os.path.dirname(dfn))
-            self.stream = self._open()
-        return super(NSFileLogHandler, self).shouldRollover(record)
-    
-    def update_config(self, file_name, backup_count, max_bytes):
-        """Update RotatingFileHandler configuration.
-        Changes will be picked up in the next call to shouldRollover().
-
-        input:
-            log file name
-            max backup count
-            predetermined log file size
-        """
-        abs_file_name = self._get_abs_file_path(file_name)
-        self.baseFilename = abs_file_name 
-        self.maxBytes = max_bytes
-        self.backupCount = backup_count
-
-
-class NSSysLogHandler(logging.Handler):    
-    """Replace SysLogHandler with a custom handler 
-
-    A handler class which sends formatted logging records to a syslog
-    server.
-    """
-    def __init__(self, ident, logopt=0, facility=syslog.LOG_USER):        
-        """Initialize a handler.
-    
-        If facility is not specified, LOG_USER is used.
-        """
-        super(NSSysLogHandler, self).__init__()
-        self._ident = ident        
-        self._logopt = logopt        
-        self._facility = facility        
-        self._mappings = {            
-                logging.DEBUG: syslog.LOG_DEBUG,            
-                logging.INFO: syslog.LOG_INFO,            
-                logging.WARNING: syslog.LOG_WARNING,            
-                logging.ERROR: syslog.LOG_ERR,            
-                logging.CRITICAL: syslog.LOG_CRIT,            
-                }   
-        
-    def _encodeLevel(self, level):        
-        """Encoding the priority."""
-        return self._mappings.get(level, syslog.LOG_INFO)    
-   
-    def emit(self, record):   
-        """Emit a record.
-     
-        The record is formatted, and then sent to the syslog server. If
-        exception information is present, it is NOT sent to the server.
-        """
-        syslog.openlog(self._ident, self._logopt, self._facility)        
-        msg = self.format(record)        
-        prio = self._encodeLevel(record.levelno)        
-        syslog.syslog(prio, msg)        
-        syslog.closelog()
-
-
-class NSLogger(logging.getLoggerClass()):
-    """Override logging.logger behaviour."""
-    def __init__(self, log_name, log_file, severity='debug', versions=0,
-                 max_bytes=0, log_to_console=True):
-        """Initializes the logger with some specific parameters
-
-        If log_to_console is True, stream handler will be used;
-        else syslog handler will be used.
-
-        To disable file handler, set log_file = ''.
-        """
-        self._log_name = log_name
-        self._log_file = log_file
-        self._severity = severity
-        self._versions = versions
-        self._max_bytes = max_bytes
-
-        super(NSLogger, self).__init__(self._log_name)
-
-        # Set up a specific logger with our desired output level
-        logLevel = LEVELS.get(self._severity, logging.NOTSET)
-        self.setLevel(logLevel)
-
-        self._file_handler = None
-        self._stream_handler = None
-        self._syslog_handler = None
-
-        self._add_rotate_handler(self._log_file, self._versions, self._max_bytes)
-        if log_to_console:
-            self._add_stream_handler()
-        else:
-            self._add_syslog_handler()
-
-    def _add_rotate_handler(self, log_file, backup_count, max_bytes):
-        """Add a rotate file handler.
-   
-        input:
-            log_file : the location of log file. Handler will not be created 
-                       if log_file=''
-            max_bytes : limit log growth
-            backup_count : max backup count
-        """
-        if (log_file != 0  and log_file != ''):
-            try:
-                self._file_handler = NSFileLogHandler(filename = log_file,
-                                          maxBytes = max_bytes, backupCount = backup_count)
-            except (IOError, OSError) as e:
-                self._file_handler = None
-                log_err("[b10-logging] Add file handler fail", str(e))
-                return
-            self._file_handler.setFormatter(TIME_FORMATTER)
-            self.addHandler(self._file_handler)
-
-    def _add_stream_handler(self):
-        """Add a stream handler.
-    
-        sys.stderr will be used for logging output.
-        """
-        self._stream_handler = logging.StreamHandler()
-        self._stream_handler.setFormatter(TIME_FORMATTER)
-        self.addHandler(self._stream_handler)
-
-    def _add_syslog_handler(self, nsfacility=syslog.LOG_USER):
-        """Add a syslog handler.
-   
-        If facility is not specified, LOG_USER is used.
-        The default severity level is INFO.
-        """
-        self._syslog_handler = NSSysLogHandler('BIND10', facility = nsfacility)
-        self._syslog_handler.setFormatter(FORMATTER)
-        #set syslog handler severity level INFO
-        self._syslog_handler.setLevel(logging.INFO)
-        self.addHandler(self._syslog_handler)
-
-    def _update_rotate_handler(self, log_file, backup_count, max_bytes):
-        """If the rotate file handler has been added to the logger, update its 
-        configuration, or add it to the logger.
-        """
-        if (self._file_handler in self.handlers):
-            if (log_file != 0 and log_file != ''):
-                self._file_handler.update_config(log_file, backup_count, max_bytes)
-            else:
-                """If log file is empty, the handler will be removed."""
-                self._file_handler.flush()
-                self._file_handler.close()
-                self.removeHandler(self._file_handler)
-        else:
-            self._add_rotate_handler(log_file, backup_count, max_bytes)
-
-    def _get_config(self, config_data):
-         """Get config data from module configuration"""
-         
-         log_file_str = config_data.get('log_file')
-         if (log_file_str):
-            self._log_file = log_file_str
-         
-         severity_str = config_data.get('log_severity')
-         if (severity_str):
-             self._severity = severity_str
-
-         versions_str = config_data.get('log_versions')
-         if (versions_str):
-             self._versions = int(versions_str)
-
-         max_bytes_str = config_data.get('log_max_bytes')
-         if (max_bytes_str):
-             self._max_bytes = int(max_bytes_str)
-
-    def update_config(self, config_data):
-        """Update logger's configuration.
-
-        We can update logger's log level and its rotate file handler's configuration.
-        """
-        self._get_config(config_data)
-
-        logLevel = LEVELS.get(self._severity, logging.NOTSET)
-        if (logLevel != self.getEffectiveLevel()):
-            self.setLevel(logLevel)
-        self._update_rotate_handler(self._log_file, self._versions, self._max_bytes)
-
-    def log_message(self, level, msg, *args, **kwargs):
-        """Log 'msg % args' with the integer severity 'level'.
-     
-        To pass exception information, use the keyword argument exc_info with
-        a true value, e.g.
-  
-        logger.log_message('info', "We have a %s", "mysterious problem").
-        """
-        logLevel = LEVELS.get(level, logging.NOTSET)
-        try:
-            self.log(logLevel, msg, *args, **kwargs)
-        except (TypeError, KeyError) as e:
-            sys.stderr.write("[b10-logging] Log message fail %s\n" % (str(e)))
-
-
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 86b3e5d..6bb67de 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -1,16 +1,28 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = log_test.py
-EXTRA_DIST = $(PYTESTS)
+EXTRA_DIST = $(PYTESTS) log_console.py.in console.out check_output.sh
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
+	$(LIBRARY_PATH_PLACEHOLDER) \
+	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+	$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
 if ENABLE_PYTHON_COVERAGE
-	touch $(abs_top_srcdir)/.coverage 
+	touch $(abs_top_srcdir)/.coverage
 	rm -f .coverage
 	${LN_S} $(abs_top_srcdir)/.coverage .coverage
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+	$(LIBRARY_PATH_PLACEHOLDER) \
+	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/log/tests/check_output.sh b/src/lib/python/isc/log/tests/check_output.sh
new file mode 100755
index 0000000..32146af
--- /dev/null
+++ b/src/lib/python/isc/log/tests/check_output.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+"$1" 2>&1 | cut -d\  -f3- | diff - "$2" 1>&2
diff --git a/src/lib/python/isc/log/tests/console.out b/src/lib/python/isc/log/tests/console.out
new file mode 100644
index 0000000..fbb1bb9
--- /dev/null
+++ b/src/lib/python/isc/log/tests/console.out
@@ -0,0 +1,4 @@
+INFO  [test.output] MSG_ID Message with list [1, 2, 3, 4]
+WARN  [test.output] DIFFERENT Different message
+FATAL [test.output] MSG_ID Message with 2 1
+DEBUG [test.output] MSG_ID Message with 3 2
diff --git a/src/lib/python/isc/log/tests/log_console.py.in b/src/lib/python/isc/log/tests/log_console.py.in
new file mode 100755
index 0000000..af05f61
--- /dev/null
+++ b/src/lib/python/isc/log/tests/log_console.py.in
@@ -0,0 +1,15 @@
+#!@PYTHON@
+
+import isc.log
+# This would come from a dictionary in real life
+MSG_ID = isc.log.create_message("MSG_ID", "Message with %2 %1")
+DIFFERENT = isc.log.create_message("DIFFERENT", "Different message")
+isc.log.init("test")
+logger = isc.log.Logger("output")
+
+logger.debug(20, MSG_ID, "test", "no output")
+logger.info(MSG_ID, [1, 2, 3, 4], "list")
+logger.warn(DIFFERENT)
+logger.fatal(MSG_ID, 1, 2)
+logger.set_severity("DEBUG", 99)
+logger.debug(1, MSG_ID, 2, 3)
diff --git a/src/lib/python/isc/log/tests/log_test.in b/src/lib/python/isc/log/tests/log_test.in
deleted file mode 100644
index 60e5e3f..0000000
--- a/src/lib/python/isc/log/tests/log_test.in
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
-export PYTHON_EXEC
-
-TEST_PATH=@abs_top_srcdir@/src/lib/python/isc/log/tests
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
-export PYTHONPATH
-
-cd ${TEST_PATH}
-exec ${PYTHON_EXEC} -O log_test.py $*
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 026dee1..4292b6c 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2010  Internet Systems Consortium.
+# Copyright (C) 2011  Internet Systems Consortium.
 #
 # Permission to use, copy, modify, and distribute this software for any
 # purpose with or without fee is hereby granted, provided that the above
@@ -13,225 +13,151 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-#
-# Tests for the python logging module
-#
-
-from isc.log.log import *
+# This tests it can be loaded, nothing more yet
+import isc.log
 import unittest
-import os
-import sys
-import tempfile
-
-
-class TestRotateFileHandler(unittest.TestCase):
+import json
+import bind10_config
+from isc.config.ccsession import path_search
 
+class LogDict(unittest.TestCase):
     def setUp(self):
-        self.FILE_LOG1 = tempfile.NamedTemporaryFile(mode='w',
-                                                 prefix="b10",
-                                                 delete=True)
-        self.FILE_LOG2 = tempfile.NamedTemporaryFile(mode='w',
-                                                 prefix="b10",
-                                                 delete=True)
-        self.FILE_LOG3 = tempfile.NamedTemporaryFile(mode='w',
-                                                 prefix="b10",
-                                                 delete=True)
-        self.handler = NSFileLogHandler(filename = self.FILE_LOG1.name, 
-                                        maxBytes = 1024, 
-                                        backupCount = 5)
-
-    def test_shouldRollover(self):
-        if(os.path.exists(self.FILE_LOG1.name)):
-            os.remove(self.FILE_LOG1.name)
-        record = logging.LogRecord(None, None, "", 0, "rotate file handler", (), None, None)
-        self.handler.shouldRollover(record)
-        self.assertTrue(os.path.exists(self.FILE_LOG1.name))
-
-    def test_get_absolute_file_path(self):
-        abs_file_name = self.handler._get_abs_file_path(self.FILE_LOG1.name)
-        self.assertEqual(abs_file_name, self.FILE_LOG1.name)
-        # test bare filename
-        file_name1 = "bind10.py"
-        abs_file_name = self.handler._get_abs_file_path(file_name1)
-        self.assertEqual(abs_file_name, os.path.join(os.getcwd(), file_name1))
-        # test relative path 
-        file_name2 = "./bind10.py"
-        abs_file_name = self.handler._get_abs_file_path(file_name2)
-        self.assertEqual(abs_file_name, os.path.join(os.getcwd(), os.path.basename(file_name2)))
-         
-    def test_update_config(self):
-        self.handler.update_config(self.FILE_LOG2.name, 3, 512)
-        self.assertEqual(self.handler.baseFilename, self.FILE_LOG2.name)
-        self.assertEqual(self.handler.maxBytes, 512)
-        self.assertEqual(self.handler.backupCount, 3)
-
-        # check the existence of new log file.
-        # emit() will call shouldRollover() to update the log file
-        if(os.path.exists(self.FILE_LOG2.name)):
-            os.remove(self.FILE_LOG2.name)
-        record = logging.LogRecord(None, None, "", 0, "rotate file handler", (), None, None)
-        self.handler.emit(record)
-        self.assertTrue(os.path.exists(self.FILE_LOG2.name))
-
-    def test_handle_Error(self):
-        if(os.path.exists(self.FILE_LOG3.name)):
-            os.remove(self.FILE_LOG3.name)
-        # redirect error message to file
-        savederr = sys.stderr
-        errfd = open(self.FILE_LOG3.name, 'w+')
-        sys.stderr = errfd
-        record = logging.LogRecord(None, None, "", 0, "record message", (), None, None)
-        try:
-            raise ValueError("ValueError")
-        except ValueError:
-            self.handler.handleError(record)
-
-        self.assertEqual("[b10-logging] : ValueError", errfd.read())
-        sys.stderr = savederr
-        errfd.close()
-
+        # We work on a test dictionary now.
+        isc.log.set_test_dictionary(True)
     def tearDown(self):
-        self.handler.flush()
-        self.handler.close()
-        self.FILE_LOG1.close()
-        self.FILE_LOG2.close()
-        self.FILE_LOG3.close()
+        # Return to the global dictionary
+        isc.log.set_test_dictionary(False)
 
-class TestSysLogHandler(unittest.TestCase):
-    def setUp(self):
-        self.handler = NSSysLogHandler("BIND10")
-
-    def test_encodeLevel(self):
-        sysLevel = self.handler._encodeLevel(logging.ERROR)
-        self.assertEqual(sysLevel, syslog.LOG_ERR)
+    def test_load_msgs(self):
+        # Try loading a message and see it's there, but nothing more
+        self.assertEqual(isc.log.create_message("ID", "Text"), "ID")
+        self.assertEqual(isc.log.get_message("ID"), "Text")
+        self.assertEqual(isc.log.get_message("no-ID"), None)
 
-    def test_emit(self):
-        syslog_message = "bind10 syslog testing"
-        record = logging.LogRecord(None, None, "", 0, syslog_message, (), None, None)
-        self.handler.emit(record)
+class Manager(unittest.TestCase):
+    def tearDown(self):
+        isc.log.reset()
+
+    def test_init_debug(self):
+        # We try calling it now only, as we don't have any other functions
+        # to check the outcome by it. Once we add the logger class, we may
+        # check more.
+        isc.log.init("root", "DEBUG", 50, None)
+
+    def test_init_defaults(self):
+        # We try calling it now only, as we don't have any other functions
+        # to check the outcome by it. Once we add the logger class, we may
+        # check more.
+        isc.log.init("root")
+
+    def test_init_notfound(self):
+        # This should not throw, because the C++ one doesn't. Should we really
+        # ignore errors like missing file?
+        isc.log.init("root", "INFO", 0, "/no/such/file");
+
+    def test_log_config_update(self):
+        log_spec = json.dumps(isc.config.module_spec_from_file(path_search('logging.spec', bind10_config.PLUGIN_PATHS)).get_full_spec())
+
+        self.assertRaises(TypeError, isc.log.log_config_update)
+        self.assertRaises(TypeError, isc.log.log_config_update, 1)
+        self.assertRaises(TypeError, isc.log.log_config_update, 1, 1)
+        self.assertRaises(TypeError, isc.log.log_config_update, 1, 1, 1)
+
+        self.assertRaises(TypeError, isc.log.log_config_update, 1, log_spec)
+        self.assertRaises(TypeError, isc.log.log_config_update, [], log_spec)
+        self.assertRaises(TypeError, isc.log.log_config_update, "foo", log_spec)
+        self.assertRaises(TypeError, isc.log.log_config_update, "{ '", log_spec)
+
+        # empty should pass
+        isc.log.log_config_update("{}", log_spec)
+
+        # bad spec
+        self.assertRaises(TypeError, isc.log.log_config_update, "{}", json.dumps({"foo": "bar"}))
+
+        # Try a correct one
+        log_conf = json.dumps({"loggers":
+                                [{"name": "b10-xfrout", "output_options":
+                                    [{"output": "/tmp/bind10.log",
+                                       "destination": "file",
+                                       "flush": True}]}]})
+        isc.log.log_config_update(log_conf, log_spec)
+
+class Logger(unittest.TestCase):
+    def tearDown(self):
+        isc.log.reset()
 
-class TestLogging(unittest.TestCase):
-    
     def setUp(self):
-        self.FILE_STREAM_LOG1 = tempfile.NamedTemporaryFile(mode='w',
-                                                      prefix="b10",
-                                                      delete=True)
-        self.FILE_STREAM_LOG2 = tempfile.NamedTemporaryFile(mode='w',
-                                                      prefix="b10",
-                                                      delete=True)
-        self.FILE_STREAM_LOG3 = tempfile.NamedTemporaryFile(mode='w',
-                                                      prefix="b10",
-                                                      delete=True)
-        self.file_stream_logger = NSLogger('File_Stream_Logger',
-                                           self.FILE_STREAM_LOG1.name,
-                                           'debug', 5, 1024, True)
-        self.syslog_logger = NSLogger('SysLogger', '', 'info', 5, 1024, False)
-        self.stderr_bak = sys.stderr
-        sys.stderr = open(os.devnull, 'w')
-    
-    def test_logging_init(self):
-        self.assertNotEqual(self.file_stream_logger._file_handler, None)
-        self.assertNotEqual(self.file_stream_logger._stream_handler, None)
-        self.assertEqual(self.file_stream_logger._syslog_handler, None)
-
-        self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-        self.assertIn(self.file_stream_logger._stream_handler, self.file_stream_logger.handlers)
-        self.assertNotIn(self.file_stream_logger._syslog_handler, self.file_stream_logger.handlers)
-        logLevel = LEVELS.get('debug', logging.NOTSET)
-        self.assertEqual(self.file_stream_logger.getEffectiveLevel(), logLevel)
-
-        self.assertEqual(self.syslog_logger._file_handler, None)
-        self.assertEqual(self.syslog_logger._stream_handler, None)
-        self.assertNotEqual(self.syslog_logger._syslog_handler, None)
-        self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-        self.assertNotIn(self.syslog_logger._stream_handler, self.syslog_logger.handlers)
-        self.assertIn(self.syslog_logger._syslog_handler, self.syslog_logger.handlers)
-
-        logLevel = LEVELS.get('info', logging.NOTSET)
-        self.assertEqual(self.syslog_logger.getEffectiveLevel(), logLevel)
-
-    def test_add_rotate_handler(self):
-        if(self.syslog_logger._file_handler in self.syslog_logger.handlers):
-            self.syslog_logger.removeHandler(self.syslog_logger._file_handler)
-        
-        self.syslog_logger._add_rotate_handler('', 5, 1024)
-        self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-
-        self.syslog_logger._add_rotate_handler(self.FILE_STREAM_LOG1.name, 5, 1024)
-        self.assertIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-
-        # test IOError exception
-        self.syslog_logger.removeHandler(self.syslog_logger._file_handler)
-        log_file = self.FILE_STREAM_LOG1.name + '/logfile'
-        self.syslog_logger._add_rotate_handler(log_file, 5, 1024)
-        self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-
-    def test_add_stream_handler(self):
-        if(self.file_stream_logger._stream_handler in self.file_stream_logger.handlers):
-            self.file_stream_logger.removeHandler(self.file_stream_logger._stream_handler)
-
-        self.file_stream_logger._add_stream_handler()
-        self.assertIn(self.file_stream_logger._stream_handler, self.file_stream_logger.handlers)
-
-    def test_add_syslog_handler(self):
-        if(self.syslog_logger._syslog_handler in self.syslog_logger.handlers):
-            self.syslog_logger.removeHandler(self.syslog_logger._syslog_handler)
-
-        self.syslog_logger._add_syslog_handler()
-        self.assertIn(self.syslog_logger._syslog_handler, self.syslog_logger.handlers)
-
-    def test_update_rotate_handler(self):
-        self.file_stream_logger._update_rotate_handler(self.FILE_STREAM_LOG2.name, 4, 1024)
-        self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-
-        self.file_stream_logger._update_rotate_handler('', 5, 1024)
-        self.assertNotIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-
-        self.file_stream_logger._update_rotate_handler(self.FILE_STREAM_LOG1.name, 4, 1024)
-        self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-
-    def test_get_config(self):
-        config_data = {'log_file' : self.FILE_STREAM_LOG1.name,
-                       'log_severity' : 'critical',
-                       'log_versions' : 4,
-                       'log_max_bytes' : 1024}
-        self.file_stream_logger._get_config(config_data)
-        self.assertEqual(self.file_stream_logger._log_file, self.FILE_STREAM_LOG1.name)
-        self.assertEqual(self.file_stream_logger._severity, 'critical')
-        self.assertEqual(self.file_stream_logger._versions, 4)
-        self.assertEqual(self.file_stream_logger._max_bytes, 1024)
-
-
-    def test_update_config(self):
-        update_config = {'log_file' : self.FILE_STREAM_LOG1.name,
-                         'log_severity' : 'error',
-                         'log_versions' : 4,
-                         'log_max_bytes' : 1024}
-        self.file_stream_logger.update_config(update_config)
-        logLevel = LEVELS.get('error', logging.NOTSET)
-        self.assertEqual(self.file_stream_logger.getEffectiveLevel(), logLevel)
-
-    def test_log_message(self):
-        update_config = {'log_file' : self.FILE_STREAM_LOG3.name,
-                         'log_severity' : 'critical',
-                         'log_versions' : 4,
-                         'log_max_bytes' : 1024}
-        self.file_stream_logger.update_config(update_config)
-        self.file_stream_logger.log_message('debug', 'debug message')
-        self.file_stream_logger.log_message('warning', 'warning message')
-        self.file_stream_logger.log_message('error', 'error message')
-        #test non-exist log level
-        self.assertRaises(None, self.file_stream_logger.log_message('not-exist', 'not exist message'))
-        #test log_message KeyError exception
-        self.assertRaises(None, self.file_stream_logger.log_message('critical', 'critical message', extra=['message', 'asctime']))
-        self.assertTrue(os.path.exists(self.FILE_STREAM_LOG3.name))
-    
-    def tearDown(self):
-        self.FILE_STREAM_LOG1.close()
-        self.FILE_STREAM_LOG2.close()
-        self.FILE_STREAM_LOG3.close()
-        sys.stderr.flush();
-        sys.stderr = self.stderr_bak
+        isc.log.init("root", "DEBUG", 50)
+        self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+
+    # Checks defaults of the logger
+    def defaults(self, logger):
+        self.assertEqual(logger.get_effective_severity(), "DEBUG")
+        self.assertEqual(logger.get_effective_debug_level(), 50)
+
+    def test_default_severity(self):
+        logger = isc.log.Logger("child")
+        self.defaults(logger)
+
+    # Try changing the severities little bit
+    def test_severity(self):
+        logger = isc.log.Logger("child")
+        logger.set_severity('DEBUG', 25)
+        self.assertEqual(logger.get_effective_severity(), "DEBUG")
+        self.assertEqual(logger.get_effective_debug_level(), 25)
+        for sev in self.sevs:
+            logger.set_severity(sev)
+            self.assertEqual(logger.get_effective_severity(), sev)
+            self.assertEqual(logger.get_effective_debug_level(), 0)
+        # Return to default
+        logger.set_severity(None)
+        self.defaults(logger)
+
+    def test_enabled(self):
+        logger = isc.log.Logger("child")
+        self.sevs.insert(0, 'DEBUG')
+        methods = {
+            'DEBUG': logger.is_debug_enabled,
+            'INFO': logger.is_info_enabled,
+            'WARN': logger.is_warn_enabled,
+            'ERROR': logger.is_error_enabled,
+            'FATAL': logger.is_fatal_enabled
+        }
+        for sev in self.sevs:
+            logger.set_severity(sev)
+            enabled = False
+            for tested in self.sevs:
+                if tested == sev:
+                    enabled = True
+                self.assertEqual(methods[tested](), enabled)
+        logger.set_severity('DEBUG', 50)
+        self.assertTrue(logger.is_debug_enabled())
+        self.assertTrue(logger.is_debug_enabled(0))
+        self.assertTrue(logger.is_debug_enabled(50))
+        self.assertFalse(logger.is_debug_enabled(99))
+
+    def test_invalid_params(self):
+        """
+           Tests invalid arguments for logging functions. The output is tested
+           in check_output.sh.
+        """
+        logger = isc.log.Logger("child")
+        methods = [
+            logger.info,
+            logger.warn,
+            logger.error,
+            logger.fatal
+        ]
+        for meth in methods:
+            # Not enough arguments
+            self.assertRaises(TypeError, meth)
+            # Bad type
+            self.assertRaises(TypeError, meth, 1)
+        # Too few arguments
+        self.assertRaises(TypeError, logger.debug, 42)
+        self.assertRaises(TypeError, logger.debug)
+        # Bad type
+        self.assertRaises(TypeError, logger.debug, "42", "hello")
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/lib/python/isc/net/Makefile.am b/src/lib/python/isc/net/Makefile.am
index bb6057c..1b97614 100644
--- a/src/lib/python/isc/net/Makefile.am
+++ b/src/lib/python/isc/net/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = tests
 python_PYTHON = __init__.py addr.py parse.py
 
 pythondir = $(pyexecdir)/isc/net
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 73528d2..3a04f17 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = addr_test.py parse_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/notify/Makefile.am b/src/lib/python/isc/notify/Makefile.am
index f4a94fa..4081a17 100644
--- a/src/lib/python/isc/notify/Makefile.am
+++ b/src/lib/python/isc/notify/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
 python_PYTHON = __init__.py notify_out.py
 
 pythondir = $(pyexecdir)/isc/notify
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index a83ff86..1427d93 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index dd032fb..0b08257 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1 +1,6 @@
 EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/util/Makefile.am b/src/lib/python/isc/util/Makefile.am
index f6cbb78..140e221 100644
--- a/src/lib/python/isc/util/Makefile.am
+++ b/src/lib/python/isc/util/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
 python_PYTHON = __init__.py process.py socketserver_mixin.py file.py
 
 pythondir = $(pyexecdir)/isc/util
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index 0ce96de..c3d35c2 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = process_test.py socketserver_mixin_test.py file_test.py
 EXTRA_DIST = $(PYTESTS)
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
 	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/util/io/Makefile.am b/src/lib/util/io/Makefile.am
index b2653d8..cbcd54d 100644
--- a/src/lib/util/io/Makefile.am
+++ b/src/lib/util/io/Makefile.am
@@ -13,4 +13,6 @@ libutil_io_python_la_LDFLAGS = -module
 libutil_io_python_la_SOURCES = fdshare_python.cc
 libutil_io_python_la_LIBADD = libutil_io.la
 libutil_io_python_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
-libutil_io_python_la_CXXFLAGS = $(AM_CXXFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+libutil_io_python_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
diff --git a/src/lib/util/pyunittests/Makefile.am b/src/lib/util/pyunittests/Makefile.am
index e8fefbd..63ccf2a 100644
--- a/src/lib/util/pyunittests/Makefile.am
+++ b/src/lib/util/pyunittests/Makefile.am
@@ -6,6 +6,9 @@ pyexec_LTLIBRARIES = pyunittests_util.la
 pyunittests_util_la_SOURCES = pyunittests_util.cc
 pyunittests_util_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
 pyunittests_util_la_LDFLAGS = $(PYTHON_LDFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+pyunittests_util_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 
 # Python prefers .so, while some OSes (specifically MacOS) use a different
 # suffix for dynamic objects.  -module is necessary to work this around.




More information about the bind10-changes mailing list