BIND 10 trac930, updated. 732ee5d1ac0091aec99862b7b43ee62d9c84df45 [trac930] revise the entry of ChangeLog for trac928, trac929 and trac930

BIND 10 source code commits bind10-changes at lists.isc.org
Fri Aug 12 07:47:53 UTC 2011


The branch, trac930 has been updated
  discards  ced83a368040f132826daf0f8fc491e89f1fe904 (commit)
  discards  400d7bc238eab4335ab8ca7e54b6cd788848e61e (commit)
  discards  5438d88d968b32cdf6df2c1f0dfa5bd6d919bf51 (commit)
  discards  7d3137f7f0179d767797dd77e7e7b6315faac982 (commit)
  discards  a5f71cf67d49406cd72d6b9b7e62284e4d6f0698 (commit)
  discards  3c94190a8441d021d18ed9fb0a97ee6c33c06f5c (commit)
  discards  271d1a86256ea6d6a03e1f9a9eeca4ffd4184c25 (commit)
  discards  1f9dc923c37b9939541912477867ce738c7a852d (commit)
  discards  940f9cfe99ce2a2d18f3cae5cef1b7f06b7e022e (commit)
  discards  679886f108588b7920342e47c59278733c649832 (commit)
  discards  8a174c278e26fc3615e5d45ae787080c04214186 (commit)
  discards  1c95d795cc7ba5ccff3a1d7344e179f3703e06be (commit)
  discards  f59a43994babb4e45cc2c1d58970dda783b8cd70 (commit)
  discards  d425049d0bd79aaa9a684e093edf3c59cb76ec6b (commit)
  discards  3d20458b7286e7605796f118f9408600bc156f84 (commit)
  discards  3e0e82ee8847e58db70380ba8491934793ed6464 (commit)
  discards  ededbd7bee7b947a5c67f1182af9a7488ede5d34 (commit)
  discards  d47292ef388ba0277d373bb5c606a2bdba74fb60 (commit)
  discards  45cc7d7032342fdc3ccff275f43bd21fb73295ac (commit)
  discards  caa7d5289b0e13fc94a0c3590d80426400b0aecb (commit)
  discards  2347a66b1cf38ef9be15b426fb454905e10d2837 (commit)
  discards  78960d84a648d966137634f8c34e9907e6b26fbb (commit)
  discards  511907f71678284b24be5a26b14c9f4d88f5474e (commit)
  discards  b5951b9135524fb64f1a0c10012d7ccbbfb5327c (commit)
  discards  1c54118918e40ac3737296ad73ada450e377394b (commit)
  discards  d9574e5f5b892fc57a704875f6acc3e2dfd2e45c (commit)
  discards  c9ff40c5e3948e242daa95b71c8812083780ef85 (commit)
  discards  bdaa8a23fffcb134765a9ce111df9026b5fa2112 (commit)
  discards  6840ed899ff442f504cd4a874b6305d2fc3a83e1 (commit)
  discards  4034ce92cb567327bd22bf69797e9c85e739fc2a (commit)
  discards  15fa85f2568e8ec1c04660dd84b85456ef742a7e (commit)
  discards  e797a46534105e6e98f813bd43b8ac040db56ea3 (commit)
  discards  abba8ffdb5304af3ec00f738e4152c9fc6319d03 (commit)
  discards  0c9fe28c6fbc3e066ca429e13b639e9d37ea7763 (commit)
  discards  d5e6f38c8a91b51078981e10a41bc433109e28de (commit)
  discards  0c79a09760fc7609c27a2210a66769b567aabf16 (commit)
  discards  8b5d3e59e153d2be6aa078b836dc01b7f0a821e3 (commit)
  discards  575499fb47935be7aa7a9dec42f8dfbc82075f01 (commit)
  discards  4608c1375bff6bafc3525d4e2844573dd039cc44 (commit)
  discards  41a507fd2ec327a65f25980d72dd0e9ef6575310 (commit)
  discards  221277c960c36686233a2f4479eb387d66d69ab8 (commit)
  discards  e9620e0d9dd3d967bcfb99562f13848c70538a44 (commit)
  discards  4424c780b981979ec4e65ba96dd9da68fb0a1005 (commit)
  discards  e500ed2463dcccab836684879fb077db67bd418a (commit)
  discards  ca29cdacc5e89afb0315c5de967c16ea59caaca5 (commit)
  discards  4392d671c9a64b64631637da6bf1676b6f13dc10 (commit)
  discards  ce7cc078ead7a2b0de65aa8829f84b8595cacc3a (commit)
  discards  6cdc9fca6665ad64b94fe523df0411aeb35349a2 (commit)
  discards  d4b1a8f7d1e5d8a167d3da9ca66b6776d3b1610b (commit)
  discards  063b2043ae185b4a401b0ebe68c75d272d5338e3 (commit)
  discards  cb5968846c2729dce867cce411ebcd656332918c (commit)
  discards  8d5bb686ec68ef08d7e710ccaea29d26cad9db75 (commit)
  discards  1485c0e487c574f6ea129b227a72adff4a2cd829 (commit)
  discards  560e0556f6f67a093710cc4a1d6b5fec5b68fa57 (commit)
  discards  f3ac98b2976433a09664c427cfa3c18bb08bee0a (commit)
  discards  e15fda42ee91e72025bf48a74472f1b9e3e097f4 (commit)
  discards  60dec60829335a618f9b786ca1b5bba1234136da (commit)
       via  732ee5d1ac0091aec99862b7b43ee62d9c84df45 (commit)
       via  5a605b8e01e18c6ac3c309634d952c7058c2c414 (commit)
       via  b14b3742acdea7c8ad26321c49dcae6d547c0c6a (commit)
       via  db91d50b51e831b8388bbfd7233537a29d86dab2 (commit)
       via  df5cfbd3af5c3da48699367e00854b7831de16f1 (commit)
       via  9587813bd3757b16845c0c0902e101424e5eb8bd (commit)
       via  b9597508947bd57ef7fab55628cf97c6c5afcce8 (commit)
       via  0ebf17477af88cdd11029caeb485ceb105471457 (commit)
       via  cdbacefd925f5b0e3ffd51b1ad8f07d35089e673 (commit)
       via  31b4eb7e1e8e348ba0a31354ca788e44538164c9 (commit)
       via  2afe6b613b4965eb0ff3f8deb6e250bd77fa9324 (commit)
       via  75163a8bf9838ed4109d5ba766f17fbc736f1ac2 (commit)
       via  1af449cb9ce4df5d429b6f700b08bfa38ce06236 (commit)
       via  aa11de71155b006c19e299c185afce675c452749 (commit)
       via  ca409a7d71b91fb33d42a5d1a0cf5ab1ea2bb491 (commit)
       via  312653dfd3d6d26a07b80895f7292b4d1c318d19 (commit)
       via  f77f32a6085905778586bd43b1cc78fc7b3ffa1d (commit)
       via  926497e73b0ad8f2508fc9bfa3868f04fd7b05e8 (commit)
       via  b7c9f192457178cfff64072ed8f0bea01d416c83 (commit)
       via  2aa136505584811bce8260ee2ee17c9c20703cb9 (commit)
       via  63bff4c4a6dc5b32fa53c82db128e04024b65a0f (commit)
       via  09468a691a66c7d9d3b6d7a055415370741f102b (commit)
       via  2067eda9635f92312e5d8d7251cff2b354329a4c (commit)
       via  9398673539d34182bd154cdd18cb211b6ba96fde (commit)
       via  3b04ea8924dc6ae5f4993060e32f67c2d3e0d959 (commit)
       via  acfd5206272b4a6c062a72e6dc6176e518557ba5 (commit)
       via  f6e79b1390bd1c530a654c150caadf885293b02c (commit)
       via  07f9fea30ce4f11d57c04f619da42441b93e9706 (commit)
       via  a2a36de8522a4c47df9b98847e9a9273224e1c4e (commit)
       via  d37222088c0cefe8a5fb314717569741bae76b93 (commit)
       via  f44f640bb841376070e92bd2106b339a057fc2a6 (commit)
       via  e585e49e1ae6a56cbfbfd97bbbbd7b889c71d066 (commit)
       via  dbcb38f1431cfc470a397cf63b2a3d8d4d1c4165 (commit)
       via  b86acab9caa3ceebd12cf96e64d00bfcda10324b (commit)
       via  ce38c4520c011e6c81bb4882eea1167a85d501e3 (commit)
       via  96874e7eeb545e9f63e27edf7a604185cc28c306 (commit)
       via  cce58c235847c36a614edd4b1bfe74f8fa166dda (commit)
       via  f83b10d2b5347113aa3b9b6f535b70085b2259d4 (commit)
       via  ed0a306cd7adfa30e570c0bc17c69dccf01157a1 (commit)
       via  7c989a9efe8b25cb608dc92e33eb803bd682736b (commit)
       via  e98030dfd393fe9fa73bde31172a24fdfe492084 (commit)
       via  6dc5afaadd699a8d7aeba332a06d6da796c4c695 (commit)
       via  1ee0777744051fea91a49d0d1cce827c7f162002 (commit)
       via  827c461a9717bef527c678caedae7de6e61cf772 (commit)
       via  430b763b2a3cc3d902fa91fb4406ae400c0a4f4d (commit)
       via  dcf0ccfad06022738086cd8390b7cd5f6eb1fc45 (commit)
       via  88c497de1d7191df88594a7143adaa179a52d8ee (commit)
       via  c257aa7ebefbf0fd8149cba943b9a45928272322 (commit)
       via  49a65fe7a00e41b408fa768e0d6991e7d8815cf0 (commit)
       via  6045a682cf028cd06b9915502354310a46c8fcde (commit)
       via  711df35ae8a4a46f8b051aec49cf5df8845eb8a0 (commit)
       via  fc4895c406740f5a19a76861b5620c93dbf72a38 (commit)
       via  0ee7f1aa1560c6564a5fb3a5047490b8d94c8fe0 (commit)
       via  fbd4c644015d80866951e51a5623f47b6c39fb0c (commit)
       via  84f900a5bb31a4786aac2a5b9a107ad93f4e2ace (commit)
       via  e13baa92edabf6a2a069d99a306efd79b21a29d4 (commit)
       via  7f9ef1c58d82ece7e9650d7b713dff766b688f3d (commit)
       via  977f822d94c59bfd9d56373404291fc85218b1d6 (commit)
       via  d00042b03e1f85cd1d8ea8340d5ac72222e5123e (commit)
       via  0081ce40b832f4c5abaeb0316736d772aec3f08d (commit)
       via  f03688da19c21b4d46761cc4ed9da981cebe43c1 (commit)
       via  eb8ba927115b091bb407cbc29ad2d07dfed318f1 (commit)
       via  1aa26c98d1b827a80bad8abd7f8bb25c26db72b7 (commit)
       via  20483389cb90e4f46486be925b896c8a0438191c (commit)
       via  65e4595c21bf9c01fb0b7da61577ae8a79d29c30 (commit)
       via  10b192574ca253331298bbc4b05ef70d2cb927d1 (commit)
       via  97153d16eb9ecb7281ed9dc76783091964e769dd (commit)
       via  56083614ae0e8c5177786528e85d348686bf9bc2 (commit)
       via  3027ed2010e5e27ef6e8ba519b789269100f442e (commit)
       via  fc33ec0a47dce3e94fa7179d4d28d7fd050a258d (commit)
       via  9a98be99edd71e540bd65631dcbd3d766f93056e (commit)
       via  885d7987eefb0b8b694626b0831ed93123fb8d8d (commit)
       via  82348d8d9d266d91e570c4ae8d8f1afd3315178a (commit)
       via  ee2a86bd4c1472e606b3d59ef5c4392b61d7ab48 (commit)
       via  0e662967ac5a6c8e187725828cd20b826ca00000 (commit)
       via  dc979c6874916221df10de3557db0d1b4a19d221 (commit)
       via  925045f2ad19d5dccb7dde77530ea16ea7b6341b (commit)
       via  ba80991049e1e361d2b1de08160c91e5bd38b728 (commit)
       via  faa90e91384af409419363aca539709e2985708b (commit)
       via  1feeca7c2209819dd181f1fbaaa75026d3e38aa2 (commit)
       via  d7713e5c5033ccb0b51769d7f28d91619655b24d (commit)
       via  928dacfdf443393618edf7124a46c599bd760784 (commit)
       via  b34e7172b5f663faf3add7f6e72a3e2d8ffe680a (commit)
       via  7fbc6a734b2a9e33100e57cbea0ce1d20cdf4491 (commit)
       via  9f5c36321d6843ba5b2a0e9e6c10c3ffee7b14fc (commit)
       via  fea1f88cd0bb5bdeefc6048b122da4328635163d (commit)
       via  54ef8963e504e22dcf29405412a95100a210efe5 (commit)
       via  0b98878ed8a185cbc3b78c860019416bfed317bb (commit)
       via  98953e35ee95489f01fbe87e55fe91d9571fcb48 (commit)
       via  747d2952c78ee32acc485946d3922cfe899a4b48 (commit)
       via  f26298e3ae274ccea3d4bcef37f5ac85da383461 (commit)
       via  7489fa475c3f5963323a6b660e4544e48f45d37c (commit)
       via  f00712037fa4b4cbd0d677d998df3728c0c4d8fe (commit)
       via  dae8a2aabc0cc9c9f3794276676872014c5a58fa (commit)
       via  3cebb4e77088feb357b485aeeda26429f98dce9b (commit)
       via  96249117c97e625ec93d94939e9d75fad18ac2df (commit)
       via  dfc13c8130787ee07e2386773a221524ac6d802b (commit)
       via  6ee994f190d58df863c71389bf9f8edd38d8e3eb (commit)
       via  f240d7d1d55f4ae87bfd1acc9c07a90870f59a93 (commit)
       via  1c5a66507b7dc2990709308979354d8e62646a28 (commit)
       via  19912ea4537e669f9c9ad1108b6f5453025738ef (commit)
       via  3702df52de21023d90052afdc54732d9ad285b39 (commit)
       via  e47f04584b00f6d7b5c8bf9e8ae6af9aaa6831fd (commit)
       via  823e0fcf308c7f3fc88ba48070e12bd995e75392 (commit)
       via  608d45610e9f499fb43d2e52eba461d489a7d45f (commit)
       via  c42eef08cd6cb28c898d46c2168c5c08684d5c36 (commit)
       via  e76dc86b0a01a54dab56cbf8552bd0c5fbb5b461 (commit)
       via  e62e50f3143aa67bd60c2351ad61d7544f28d4ca (commit)
       via  be9d5fe994e6a086a951e432d56e7de2af3cfd09 (commit)
       via  11b8b873e7fd6722053aa224d20f29350bf2b298 (commit)
       via  b63b9aac20259f3612e23c7a3e977dcb48693ef1 (commit)
       via  14a0766224d50d1c4c409e883cf29515dafc25f0 (commit)
       via  b5fbd9c942b1080aa60a48ee23da60574d1fc22f (commit)
       via  d299036c6ac281d1d6c119c5fdbe603bed404851 (commit)
       via  e5d9f259dce621201a2c52b56b260f8de776ecc0 (commit)
       via  f773f9ac21221663bd093806374cab83abd2288d (commit)
       via  63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
       via  579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)

This update added new revisions after undoing existing revisions.  That is
to say, the old revision is not a strict subset of the new revision.  This
situation occurs when you --force push a change and generate a repository
containing something like this:

 * -- * -- B -- O -- O -- O (ced83a368040f132826daf0f8fc491e89f1fe904)
            \
             N -- N -- N (732ee5d1ac0091aec99862b7b43ee62d9c84df45)

When this happens we assume that you've already had alert emails for all
of the O revisions, and so we here report only the revisions in the N
branch from the common base, B.

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 732ee5d1ac0091aec99862b7b43ee62d9c84df45
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Aug 9 15:57:22 2011 +0900

    [trac930] revise the entry of ChangeLog for trac928, trac929 and trac930

commit 5a605b8e01e18c6ac3c309634d952c7058c2c414
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Aug 5 16:24:03 2011 +0900

    [trac930]
      - revise header comments in each test script
      - replace some hard-coded time strings with the constants defined in the
        setUp function
      - merged several checks about B10_FROM_SOURCE into the TestOSEnv class

commit b14b3742acdea7c8ad26321c49dcae6d547c0c6a
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Aug 5 14:48:27 2011 +0900

    [trac930]
      - change address for test to 127.0.0.1 due to platform 127.0.0.2 can't be
        assigned
      - remove unnecessary thread.Event.wait()
      - add thread.Event.clear() after thread.Event.wait()

commit db91d50b51e831b8388bbfd7233537a29d86dab2
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Aug 3 11:41:05 2011 +0900

    [trac930] refactor unittests
     - remove time.sleep from various unittests and add in the "run" method in
       ThreadingServerManager
     - adjust the sleep time (TIMEOUT_SEC)
     - join some small unittests
       (test_start_with_err, test_command_status, test_command_shutdown)

commit df5cfbd3af5c3da48699367e00854b7831de16f1
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Aug 2 22:00:11 2011 +0900

    [trac930] add comments about abstracts of the test scripts in their headers

commit 9587813bd3757b16845c0c0902e101424e5eb8bd
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Aug 2 21:44:07 2011 +0900

    [trac930] modify stats.py
     - add more documentations into update_modules, get_statistics_data and
       update_statistics_data methods
     - modify two methods: "update_modules" and "get_statistics_data" methods raise
       StatsError instead of just returning None, when communication between stats
       module and config manager is failed or when it can't find specified
       statistics data.
     - also modify the unittest depending on the changes of these behaviors.

commit b9597508947bd57ef7fab55628cf97c6c5afcce8
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Aug 2 20:17:28 2011 +0900

    [trac930] modify b10-stats_test.py
     - set the constant variables in the setUp method in the TestUtilties class,
       and compare values returned from the functions with these constants in
       testing methods.
    
    [trac930] remove the tearDown method which has no test case in the TestCallback
              class

commit 0ebf17477af88cdd11029caeb485ceb105471457
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Aug 2 19:57:58 2011 +0900

    [trac930] remove tailing whitespaces.

commit cdbacefd925f5b0e3ffd51b1ad8f07d35089e673
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Mon Aug 1 18:38:35 2011 +0900

    [trac930] raise StatsError including errors in the stats spec file

commit 31b4eb7e1e8e348ba0a31354ca788e44538164c9
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Mon Aug 1 18:21:23 2011 +0900

    [trac930] rename the function name
     - rename the name of 'parse_spec' to 'get_spec_defaults' in the result of
       consideration of what it is doing
     - modify the description of the function as docstring
     - fix unitttests for the stats module depending on the function name

commit 2afe6b613b4965eb0ff3f8deb6e250bd77fa9324
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 29 22:11:38 2011 +0900

    [trac930] remove a unnecessary x bit from stats_httpd.py.in

commit 75163a8bf9838ed4109d5ba766f17fbc736f1ac2
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Thu Jul 28 22:07:15 2011 +0900

    [trac930] modify logging
    add loggings and new messages for logging
    remove unused messages from the message file
    add test logging names into unittest scripts

commit 1af449cb9ce4df5d429b6f700b08bfa38ce06236
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 27 20:45:18 2011 +0900

    [trac930] modify the update_modues function
    There is no part of statistics category in the spec file of a module which has
    no statistics data.

commit aa11de71155b006c19e299c185afce675c452749
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 27 16:49:21 2011 +0900

    [trac930]
     - correct error messages in bindctl
       it prints together with arguments.
     - modify the command_show function
       it reports statistics data of the module even if name is not specified.
     - add/modify unittests depending on the changes of error messages

commit ca409a7d71b91fb33d42a5d1a0cf5ab1ea2bb491
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 27 16:42:54 2011 +0900

    [trac930] remove unnecessary a white space

commit 312653dfd3d6d26a07b80895f7292b4d1c318d19
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 27 10:18:07 2011 +0900

    [trac930] add a test pattern which the set command with a non-existent item
    name is sent

commit f77f32a6085905778586bd43b1cc78fc7b3ffa1d
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 27 10:14:57 2011 +0900

    [trac930] modify parse_spec function
    returns empty dict if list-type is not specified in the argument

commit 926497e73b0ad8f2508fc9bfa3868f04fd7b05e8
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 21:40:07 2011 +0900

    [trac930] fix conflicts with trac1021

commit b7c9f192457178cfff64072ed8f0bea01d416c83
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:50:41 2011 +0900

    [trac930] add changes because query counter names described in the specfile are changed.

commit 2aa136505584811bce8260ee2ee17c9c20703cb9
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:45:19 2011 +0900

    [trac930] add the logging when the validation of statistics data fails

commit 63bff4c4a6dc5b32fa53c82db128e04024b65a0f
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:43:26 2011 +0900

    [trac930] Add unittests to test sumitStatistics with the validation of statistics data and add mock ModuleSpec class

commit 09468a691a66c7d9d3b6d7a055415370741f102b
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:41:34 2011 +0900

    [trac930] Add prototypes of validator_typea and registerStatisticsValidator
     - validator_type -- a type of statistics validation function
     - registerStatisticsValidator -- the function to register the validation function

commit 2067eda9635f92312e5d8d7251cff2b354329a4c
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:37:22 2011 +0900

    [trac930]
    - Add implementation to validate statistics data
      -- When validation is success, it sends data to statistics module. But when it fails, it doesn't send and logs the message.
    
    - Add the function to register the validation function into the class

commit 9398673539d34182bd154cdd18cb211b6ba96fde
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:32:22 2011 +0900

    [trac930] add the helper functions which are used around the registration of the function to validate the statistics data.

commit 3b04ea8924dc6ae5f4993060e32f67c2d3e0d959
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 22 18:28:40 2011 +0900

    [trac930] add new messages into the message file of Auth and Boss
    when validation of statistics data to send to statistics module is failed.

commit acfd5206272b4a6c062a72e6dc6176e518557ba5
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 20 10:00:29 2011 +0900

    [trac930] add statistics validation for bob

commit f6e79b1390bd1c530a654c150caadf885293b02c
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 13 20:25:54 2011 +0900

    [trac930]
     - increase seconds in sleep time which is before HTTP client connects to the server
     - delete 'test_log_message' because of the deletion of original function

commit 07f9fea30ce4f11d57c04f619da42441b93e9706
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 21:22:34 2011 +0900

    [trac930] remove unneeded empty TODO comments

commit a2a36de8522a4c47df9b98847e9a9273224e1c4e
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 21:09:41 2011 +0900

    [trac930] add new entry for #928-#930

commit d37222088c0cefe8a5fb314717569741bae76b93
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 20:08:22 2011 +0900

    [trac930] refurbish the unittests for new stats module, new stats httpd module
    and new mockups and utilities in test_utils.py

commit f44f640bb841376070e92bd2106b339a057fc2a6
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 19:56:24 2011 +0900

    [trac930] modify Stats
     - remove unneeded subject and listener classes
    
     - add StatsError for handling errors in Stats
    
     - add some new methods (update_modules, update_statistics_data and
       get_statistics_data)
    
     - modify implementations of existent commands(show and set) according changes
       stats.spec
    
     - remove reset and remove command because stats module couldn't manage other
       modules' statistics data schema
    
     - add implementation of strict validation of each statistics data
       (If the validation is failed, it puts out the error.)
    
     - stats module shows its PID when status command invoked
    
     - add new command showschema invokable via bindctl
    
     - set command requires arguments of owner module name and statistics item name
    
     - show and showschema commands accepts arguments of owner module name and
       statistics item name
    
     - exits at exit code 1 if got runtime errors
    
     - has boot time in _BASETIME

commit e585e49e1ae6a56cbfbfd97bbbbd7b889c71d066
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 19:40:15 2011 +0900

    [trac930]
     - remove "stats-schema.spec" setting and getting statistics data schema via
       this spec file
    
     - add "version" item in DEFAULT_CONFIG
    
     - get the address family by socket.getaddrinfo function with specified
       server_address in advance, and create HttpServer object once, in stead of
       creating double HttpServer objects for IPv6 and IPv4 in the prior code
       (It is aimed for avoiding to fail to close the once opened sockets.)
    
     - open HTTP port in start method
    
     - avoid calling config_handler recursively in the except statement
    
     - create XML, XSD, XSL documents after getting statistics data and schema from
       remote stats module via CC session
    
     - definitely close once opened template file object

commit dbcb38f1431cfc470a397cf63b2a3d8d4d1c4165
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 16:33:59 2011 +0900

    [trac930] update spec file of stats module
    - update description of status command, shutdown command and show command
    - change argument of show command (Owner module name of statistics data can be
    specified)
    - change argument of set command (Owner module name of statistics data is
    always required)
    - add showschema command which shows statistics data schema of each module
    specified)
    - disabled reset command and remove command

commit b86acab9caa3ceebd12cf96e64d00bfcda10324b
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 16:21:49 2011 +0900

    [trac930] update argument name and argument format of set command in auth module and boss module
    and also update related unittests of their modules

commit ce38c4520c011e6c81bb4882eea1167a85d501e3
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 16:18:38 2011 +0900

    [trac930] remove description about removing statistics data by stats module
    update example format in bindctl when show command of stats module is invoked

commit 96874e7eeb545e9f63e27edf7a604185cc28c306
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 16:13:17 2011 +0900

    [trac930] add a column "Owner" in the table tag

commit cce58c235847c36a614edd4b1bfe74f8fa166dda
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 16:12:09 2011 +0900

    [trac930] remove descriptions about "stats-schema.spec" and add description about new
    features because stats module can be requested to show statistics data schema.

commit f83b10d2b5347113aa3b9b6f535b70085b2259d4
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 16:00:30 2011 +0900

    [trac930] add utilities and mock-up modules for unittests of
    statistics modules and change some environ variables (PYTHONPATH,
    CONFIG_TESTDATA_PATH) in Makefile
    
    test_utilies.py internally calls msgq, cfgmgr and some mock modules
    with threads for as real situation as possible.

commit ed0a306cd7adfa30e570c0bc17c69dccf01157a1
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:57:41 2011 +0900

    [trac930] remove unneeded mockups, fake modules and dummy data

commit 7c989a9efe8b25cb608dc92e33eb803bd682736b
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:55:55 2011 +0900

    [trac930] remove unneeded specfile "stats-schema.spec"

commit e98030dfd393fe9fa73bde31172a24fdfe492084
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Aug 9 15:53:56 2011 +0900

    [trac929]
     consideration for buffer overflow
      - use std::vector<char> instead of char[]
      - use strncmp() instead of strcmp()
      - shorten length of char array for the buffer
        (not directly related to buffer overflow)
    
     add more unittests for some wrong type formats into both c++ and python codes
     (unittests for the previous change git e9620e0d9dd3d967bcfb99562f13848c70538a44)
      - date-time-type format not ending with "Z"
      - date-type format ending with "T"
      - time-type format ending with "Z"

commit 6dc5afaadd699a8d7aeba332a06d6da796c4c695
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Aug 5 14:22:28 2011 +0900

    [trac929] add more strict check for date and time format (add reverse check)

commit 1ee0777744051fea91a49d0d1cce827c7f162002
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 29 20:07:23 2011 +0900

    [trac929] implement some methods checking config_data into the mock of
    module_spec, but actually just copy such methods from the original module_spec
    into the mock.

commit 827c461a9717bef527c678caedae7de6e61cf772
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 29 19:59:08 2011 +0900

    [trac929] fix the invalid spec file for module_spec.py
    remove format_type undefined in module_spec.py from stats-schema.spec

commit 430b763b2a3cc3d902fa91fb4406ae400c0a4f4d
Author: Jelte Jansen <jelte at isc.org>
Date:   Wed Jul 27 18:32:06 2011 +0200

    [trac929] replace != None with is not None

commit dcf0ccfad06022738086cd8390b7cd5f6eb1fc45
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Jul 26 18:04:43 2011 +0900

    [trac929] changed into the exact way of checking whether the value is "None" or
    not in the "if" branch as pointed out in the reviewing.

commit 88c497de1d7191df88594a7143adaa179a52d8ee
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Jul 19 21:02:12 2011 +0900

    [trac929]
    add unit tests of the methods using some dummy spec files with the dummy data of statistics specification
     - getStatisticsSpec()
     - validateStatistics()
     - validateStatistics()
     - check_format()
    
    add include of boost_foreach
    update the year of copyright

commit c257aa7ebefbf0fd8149cba943b9a45928272322
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Jul 19 20:57:32 2011 +0900

    [trac929]
    add some methods for statistics specification
     - getStatisticsSpec()
     - validateStatistics()
     - validateStatistics()
    
    update the year of copyright

commit 49a65fe7a00e41b408fa768e0d6991e7d8815cf0
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Tue Jul 19 20:55:39 2011 +0900

    [trac929]
    addition and modification as a variant of the statistics part of module_spec.py
     - add check_format which checks whether the given element is a valid statistics specification
     - modify check_data_specification to add check of statistics specification
     - add getStatisticsSpec() which returns statistics specification
     - add two of validateStatistics which check whether specified data is valid for statistics specification
     - modify validateItem to add check of item_format in specification
    
    update the year of copyright

commit 6045a682cf028cd06b9915502354310a46c8fcde
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:47:09 2011 +0900

    [trac929] add unittest of "get_statistics_spec"

commit 711df35ae8a4a46f8b051aec49cf5df8845eb8a0
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:46:57 2011 +0900

    [trac929] add unittests for the functions:
     - validate_format
     - check_format
     - validate_format

commit fc4895c406740f5a19a76861b5620c93dbf72a38
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:46:46 2011 +0900

    [trac929] add "validate_statistics" which validates statistics specification in the spec file
    It checks data types and data format of statistics specification

commit 0ee7f1aa1560c6564a5fb3a5047490b8d94c8fe0
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:46:27 2011 +0900

    [trac929] add "get_statistics_spec" into cfgmgr.py
    it pushes contents in statistics category of each spec file.

commit fbd4c644015d80866951e51a5623f47b6c39fb0c
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:45:28 2011 +0900

    [trac929] add COMMAND_GET_STATISTICS_SPEC for "get_statistics_spec"

commit 84f900a5bb31a4786aac2a5b9a107ad93f4e2ace
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:45:15 2011 +0900

    [trac929] add a statistics category into "spec2.spec"
    and modify message string to be compared with in EXPECT_EQ

commit e13baa92edabf6a2a069d99a306efd79b21a29d4
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Fri Jul 8 15:44:40 2011 +0900

    [trac929] add some spec files for unittest of statistics category

commit 7f9ef1c58d82ece7e9650d7b713dff766b688f3d
Author: Naoki Kambe <kambe at jprs.co.jp>
Date:   Wed Jul 20 09:00:53 2011 +0900

    [trac928] add statistics category and statistics items into some spec files (bob.spec, auth.spec, stats.spec)

-----------------------------------------------------------------------

Summary of changes:
 README                                             |    6 +-
 configure.ac                                       |   10 +-
 doc/guide/bind10-guide.xml                         |   10 +
 src/bin/auth/Makefile.am                           |    1 +
 src/bin/auth/auth_messages.mes                     |   10 +-
 src/bin/auth/benchmarks/Makefile.am                |    1 +
 src/bin/auth/tests/Makefile.am                     |    1 +
 src/bin/auth/tests/query_unittest.cc               |    4 +-
 src/bin/auth/tests/testdata/Makefile.am            |    2 +-
 src/bin/bind10/Makefile.am                         |    6 +-
 src/bin/bind10/bind10_messages.mes                 |    2 +-
 src/bin/bind10/bind10_src.py.in                    |    6 +-
 src/bin/bind10/run_bind10.sh.in                    |    2 +-
 src/bin/bind10/tests/Makefile.am                   |    2 +-
 src/bin/cmdctl/cmdctl_messages.mes                 |    2 +-
 src/bin/dhcp6/Makefile.am                          |    1 +
 src/bin/host/Makefile.am                           |    1 +
 src/bin/resolver/resolver_messages.mes             |    4 +-
 src/bin/stats/stats_httpd_messages.mes             |   12 +-
 src/bin/tests/Makefile.am                          |    1 +
 src/bin/tests/process_rename_test.py.in            |    9 +-
 src/bin/xfrout/xfrout_messages.mes                 |    2 +-
 src/bin/zonemgr/Makefile.am                        |   10 +-
 src/bin/zonemgr/zonemgr.py.in                      |   79 +-
 src/bin/zonemgr/zonemgr_messages.mes               |  145 +++
 src/lib/asiodns/asiodns_messages.mes               |   10 +-
 src/lib/bench/tests/Makefile.am                    |    1 +
 src/lib/cache/tests/Makefile.am                    |    1 +
 src/lib/cc/cc_messages.mes                         |    6 +-
 src/lib/cc/session.cc                              |    2 +-
 src/lib/datasrc/Makefile.am                        |    2 +
 src/lib/datasrc/client.h                           |    2 +
 src/lib/datasrc/database.cc                        |   87 ++
 src/lib/datasrc/database.h                         |  187 +++
 src/lib/datasrc/datasrc_messages.mes               |   14 +-
 src/lib/datasrc/memory_datasrc.cc                  |    4 +-
 src/lib/datasrc/memory_datasrc.h                   |    4 +-
 src/lib/datasrc/sqlite3_accessor.cc                |  322 ++++++
 src/lib/datasrc/sqlite3_accessor.h                 |  105 ++
 src/lib/datasrc/tests/Makefile.am                  |    3 +
 src/lib/datasrc/tests/database_unittest.cc         |   99 ++
 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc |  103 ++
 src/lib/datasrc/zone.h                             |    4 +-
 src/lib/dns/benchmarks/Makefile.am                 |    1 +
 src/lib/dns/rdata/any_255/tsig_250.cc              |  126 +--
 src/lib/dns/rdata/in_1/srv_33.cc                   |   58 +-
 src/lib/dns/tests/testdata/Makefile.am             |    5 +-
 src/lib/dns/tests/testdata/gen-wiredata.py.in      |  610 ----------
 src/lib/python/isc/Makefile.am                     |    2 +-
 src/lib/python/isc/bind10/Makefile.am              |    4 +
 src/{bin => lib/python/isc}/bind10/__init__.py     |    0 
 src/{bin => lib/python/isc}/bind10/sockcreator.py  |    0 
 .../python/isc}/bind10/tests/Makefile.am           |    7 +-
 .../python/isc/bind10/tests/sockcreator_test.py}   |    2 +-
 src/lib/python/isc/config/ccsession.py             |    4 +-
 src/lib/python/isc/config/tests/ccsession_test.py  |    2 +
 src/lib/python/isc/notify/notify_out_messages.mes  |    2 +-
 src/lib/resolve/tests/Makefile.am                  |    1 +
 src/lib/testutils/testdata/Makefile.am             |    2 +-
 src/lib/util/Makefile.am                           |    2 +-
 src/lib/util/python/Makefile.am                    |    1 +
 src/lib/util/python/gen_wiredata.py.in             | 1189 ++++++++++++++++++++
 src/lib/util/strutil.cc                            |   11 +
 src/lib/util/strutil.h                             |   62 +
 src/lib/util/tests/strutil_unittest.cc             |   80 ++-
 65 files changed, 2612 insertions(+), 844 deletions(-)
 create mode 100644 src/bin/zonemgr/zonemgr_messages.mes
 create mode 100644 src/lib/datasrc/database.cc
 create mode 100644 src/lib/datasrc/database.h
 create mode 100644 src/lib/datasrc/sqlite3_accessor.cc
 create mode 100644 src/lib/datasrc/sqlite3_accessor.h
 create mode 100644 src/lib/datasrc/tests/database_unittest.cc
 create mode 100644 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
 delete mode 100755 src/lib/dns/tests/testdata/gen-wiredata.py.in
 create mode 100644 src/lib/python/isc/bind10/Makefile.am
 rename src/{bin => lib/python/isc}/bind10/__init__.py (100%)
 rename src/{bin => lib/python/isc}/bind10/sockcreator.py (100%)
 copy src/{bin => lib/python/isc}/bind10/tests/Makefile.am (91%)
 rename src/{bin/bind10/tests/sockcreator_test.py.in => lib/python/isc/bind10/tests/sockcreator_test.py} (99%)
 create mode 100644 src/lib/util/python/Makefile.am
 create mode 100755 src/lib/util/python/gen_wiredata.py.in

-----------------------------------------------------------------------
diff --git a/README b/README
index a6509da..a9e05f1 100644
--- a/README
+++ b/README
@@ -8,10 +8,10 @@ for serving, maintaining, and developing DNS.
 BIND10-devel is new development leading up to the production
 BIND 10 release. It contains prototype code and experimental
 interfaces. Nevertheless it is ready to use now for testing the
-new BIND 10 infrastructure ideas. The Year 2 milestones of the
-five year plan are described here:
+new BIND 10 infrastructure ideas. The Year 3 goals of the five
+year plan are described here:
 
-	https://bind10.isc.org/wiki/Year2Milestones
+	http://bind10.isc.org/wiki/Year3Goals
 
 This release includes the bind10 master process, b10-msgq message
 bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
diff --git a/configure.ac b/configure.ac
index 85602ca..ee990eb 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20110519, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20110809, bind10-dev at isc.org)
 AC_CONFIG_SRCDIR(README)
 AM_INIT_AUTOMAKE
 AC_CONFIG_HEADERS([config.h])
@@ -832,6 +832,8 @@ AC_CONFIG_FILES([Makefile
                  src/lib/python/isc/notify/Makefile
                  src/lib/python/isc/notify/tests/Makefile
                  src/lib/python/isc/testutils/Makefile
+                 src/lib/python/isc/bind10/Makefile
+                 src/lib/python/isc/bind10/tests/Makefile
                  src/lib/config/Makefile
                  src/lib/config/tests/Makefile
                  src/lib/config/tests/testdata/Makefile
@@ -864,6 +866,7 @@ AC_CONFIG_FILES([Makefile
                  src/lib/util/Makefile
                  src/lib/util/io/Makefile
                  src/lib/util/unittests/Makefile
+                 src/lib/util/python/Makefile
                  src/lib/util/pyunittests/Makefile
                  src/lib/util/tests/Makefile
                  src/lib/acl/Makefile
@@ -900,7 +903,6 @@ AC_OUTPUT([doc/version.ent
            src/bin/bind10/bind10_src.py
            src/bin/bind10/run_bind10.sh
            src/bin/bind10/tests/bind10_test.py
-           src/bin/bind10/tests/sockcreator_test.py
            src/bin/bindctl/run_bindctl.sh
            src/bin/bindctl/bindctl_main.py
            src/bin/bindctl/tests/bindctl_test
@@ -924,7 +926,6 @@ AC_OUTPUT([doc/version.ent
            src/lib/python/isc/log/tests/log_console.py
            src/lib/dns/gen-rdatacode.py
            src/lib/python/bind10_config.py
-           src/lib/dns/tests/testdata/gen-wiredata.py
            src/lib/cc/session_config.h.pre
            src/lib/cc/tests/session_unittests_config.h
            src/lib/log/tests/console_test.sh
@@ -934,6 +935,7 @@ AC_OUTPUT([doc/version.ent
            src/lib/log/tests/severity_test.sh
            src/lib/log/tests/tempdir.h
            src/lib/util/python/mkpywrapper.py
+           src/lib/util/python/gen_wiredata.py
            src/lib/server_common/tests/data_path.h
            tests/system/conf.sh
            tests/system/glue/setup.sh
@@ -958,13 +960,13 @@ AC_OUTPUT([doc/version.ent
            chmod +x src/bin/msgq/run_msgq.sh
            chmod +x src/bin/msgq/tests/msgq_test
            chmod +x src/lib/dns/gen-rdatacode.py
-           chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
            chmod +x src/lib/log/tests/console_test.sh
            chmod +x src/lib/log/tests/destination_test.sh
            chmod +x src/lib/log/tests/init_logger_test.sh
            chmod +x src/lib/log/tests/local_file_test.sh
            chmod +x src/lib/log/tests/severity_test.sh
            chmod +x src/lib/util/python/mkpywrapper.py
+           chmod +x src/lib/util/python/gen_wiredata.py
            chmod +x src/lib/python/isc/log/tests/log_console.py
            chmod +x tests/system/conf.sh
           ])
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index f3dbb60..6c3f332 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -742,6 +742,16 @@ Debian and Ubuntu:
         get additional debugging or diagnostic output.
       </para>
 <!-- TODO: note it doesn't go into background -->
+
+      <note>
+        <para>
+          If the setproctitle Python module is detected at start up,
+          the process names for the Python-based daemons will be renamed
+          to better identify them instead of just <quote>python</quote>.
+          This is not needed on some operating systems.
+        </para>
+      </note>
+
     </section>
 
   </chapter>
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 64136c1..e3128b5 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -56,6 +56,7 @@ EXTRA_DIST += auth_messages.mes
 
 b10_auth_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_auth_LDADD += $(top_builddir)/src/lib/util/libutil.la
 b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
 b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index ec9fa8a..1ffa687 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -63,7 +63,7 @@ datebase data source, listing the file that is being accessed.
 
 % AUTH_DNS_SERVICES_CREATED DNS services created
 This is a debug message indicating that the component that will handling
-incoming queries for the authoritiative server (DNSServices) has been
+incoming queries for the authoritative server (DNSServices) has been
 successfully created. It is issued during server startup is an indication
 that the initialization is proceeding normally.
 
@@ -74,7 +74,7 @@ reason for the failure is given in the message.) The server will drop the
 packet.
 
 % AUTH_LOAD_TSIG loading TSIG keys
-This is a debug message indicating that the authoritiative server
+This is a debug message indicating that the authoritative server
 has requested the keyring holding TSIG keys from the configuration
 database. It is issued during server startup is an indication that the
 initialization is proceeding normally.
@@ -141,8 +141,8 @@ encountered an internal error whilst processing a received packet:
 the cause of the error is included in the message.
 
 The server will return a SERVFAIL error code to the sender of the packet.
-However, this message indicates a potential error in the server.
-Please open a bug ticket for this issue.
+This message indicates a potential error in the server.  Please open a
+bug ticket for this issue.
 
 % AUTH_RECEIVED_COMMAND command '%1' received
 This is a debug message issued when the authoritative server has received
@@ -209,7 +209,7 @@ channel.  It is issued during server startup is an indication that the
 initialization is proceeding normally.
 
 % AUTH_STATS_COMMS communication error in sending statistics data: %1
-An error was encountered when the authoritiative server tried to send data
+An error was encountered when the authoritative server tried to send data
 to the statistics daemon. The message includes additional information
 describing the reason for the failure.
 
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index cf3fe4a..d51495b 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -17,6 +17,7 @@ query_bench_SOURCES += ../auth_log.h ../auth_log.cc
 nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
 
 query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+query_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
 query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 query_bench_LDADD += $(top_builddir)/src/lib/bench/libbench.la
 query_bench_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index 71520c2..5cd2f5a 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -47,6 +47,7 @@ run_unittests_LDADD += $(SQLITE_LIBS)
 run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 6a75856..9ef8c13 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -122,8 +122,8 @@ public:
         masterLoad(zone_stream, origin_, rrclass_,
                    boost::bind(&MockZoneFinder::loadRRset, this, _1));
     }
-    virtual const isc::dns::Name& getOrigin() const { return (origin_); }
-    virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+    virtual isc::dns::Name getOrigin() const { return (origin_); }
+    virtual isc::dns::RRClass getClass() const { return (rrclass_); }
     virtual FindResult find(const isc::dns::Name& name,
                             const isc::dns::RRType& type,
                             RRsetList* target = NULL,
diff --git a/src/bin/auth/tests/testdata/Makefile.am b/src/bin/auth/tests/testdata/Makefile.am
index f6f1f27..c86722f 100644
--- a/src/bin/auth/tests/testdata/Makefile.am
+++ b/src/bin/auth/tests/testdata/Makefile.am
@@ -23,4 +23,4 @@ EXTRA_DIST += example.com
 EXTRA_DIST += example.sqlite3
 
 .spec.wire:
-	$(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+	$(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 1a5ce64..6ab88d8 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,11 +1,7 @@
 SUBDIRS = . tests
 
 sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10_src.pyc bind10_messages.py bind10_messages.pyc \
-	sockcreator.pyc
-
-python_PYTHON = __init__.py sockcreator.py
-pythondir = $(pyexecdir)/bind10
+CLEANFILES = bind10 bind10_src.pyc bind10_messages.py bind10_messages.pyc
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 pyexec_DATA = bind10_messages.py
diff --git a/src/bin/bind10/__init__.py b/src/bin/bind10/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 0337300..4debcdb 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -122,7 +122,7 @@ The boss requested a socket from the creator, but the answer is unknown. This
 looks like a programmer error.
 
 % BIND10_SOCKCREATOR_CRASHED the socket creator crashed
-The socket creator terminated unexpectadly. It is not possible to restart it
+The socket creator terminated unexpectedly. It is not possible to restart it
 (because the boss already gave up root privileges), so the system is going
 to terminate.
 
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index b7ffb94..3deba61 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -67,7 +67,7 @@ import isc.util.process
 import isc.net.parse
 import isc.log
 from bind10_messages import *
-import bind10.sockcreator
+import isc.bind10.sockcreator
 
 isc.log.init("b10-boss")
 logger = isc.log.Logger("boss")
@@ -346,8 +346,8 @@ class BoB:
 
     def start_creator(self):
         self.curproc = 'b10-sockcreator'
-        self.sockcreator = bind10.sockcreator.Creator("@@LIBEXECDIR@@:" +
-                                                      os.environ['PATH'])
+        self.sockcreator = isc.bind10.sockcreator.Creator("@@LIBEXECDIR@@:" +
+                                                          os.environ['PATH'])
 
     def stop_creator(self, kill=False):
         if self.sockcreator is None:
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index bb44ca0..b5b9721 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -20,7 +20,7 @@ export PYTHON_EXEC
 
 BIND10_PATH=@abs_top_builddir@/src/bin/bind10
 
-PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:$PATH
+PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
 export PATH
 
 PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:
diff --git a/src/bin/bind10/sockcreator.py b/src/bin/bind10/sockcreator.py
deleted file mode 100644
index 9fcc74e..0000000
--- a/src/bin/bind10/sockcreator.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import socket
-import struct
-import os
-import subprocess
-from bind10_messages import *
-from libutil_io_python import recv_fd
-
-logger = isc.log.Logger("boss")
-
-"""
-Module that comunicates with the privileged socket creator (b10-sockcreator).
-"""
-
-class CreatorError(Exception):
-    """
-    Exception for socket creator related errors.
-
-    It has two members: fatal and errno and they are just holding the values
-    passed to the __init__ function.
-    """
-
-    def __init__(self, message, fatal, errno=None):
-        """
-        Creates the exception. The message argument is the usual string.
-        The fatal one tells if the error is fatal (eg. the creator crashed)
-        and errno is the errno value returned from socket creator, if
-        applicable.
-        """
-        Exception.__init__(self, message)
-        self.fatal = fatal
-        self.errno = errno
-
-class Parser:
-    """
-    This class knows the sockcreator language. It creates commands, sends them
-    and receives the answers and parses them.
-
-    It does not start it, the communication channel must be provided.
-
-    In theory, anything here can throw a fatal CreatorError exception, but it
-    happens only in case something like the creator process crashes. Any other
-    occasions are mentioned explicitly.
-    """
-
-    def __init__(self, creator_socket):
-        """
-        Creates the parser. The creator_socket is socket to the socket creator
-        process that will be used for communication. However, the object must
-        have a read_fd() method to read the file descriptor. This slightly
-        unusual trick with modifying an object is used to easy up testing.
-
-        You can use WrappedSocket in production code to add the method to any
-        ordinary socket.
-        """
-        self.__socket = creator_socket
-        logger.info(BIND10_SOCKCREATOR_INIT)
-
-    def terminate(self):
-        """
-        Asks the creator process to terminate and waits for it to close the
-        socket. Does not return anything. Raises a CreatorError if there is
-        still data on the socket, if there is an error closing the socket,
-        or if the socket had already been closed.
-        """
-        if self.__socket is None:
-            raise CreatorError('Terminated already', True)
-        logger.info(BIND10_SOCKCREATOR_TERMINATE)
-        try:
-            self.__socket.sendall(b'T')
-            # Wait for an EOF - it will return empty data
-            eof = self.__socket.recv(1)
-            if len(eof) != 0:
-                raise CreatorError('Protocol error - data after terminated',
-                                   True)
-            self.__socket = None
-        except socket.error as se:
-            self.__socket = None
-            raise CreatorError(str(se), True)
-
-    def get_socket(self, address, port, socktype):
-        """
-        Asks the socket creator process to create a socket. Pass an address
-        (the isc.net.IPaddr object), port number and socket type (either
-        string "UDP", "TCP" or constant socket.SOCK_DGRAM or
-        socket.SOCK_STREAM.
-
-        Blocks until it is provided by the socket creator process (which
-        should be fast, as it is on localhost) and returns the file descriptor
-        number. It raises a CreatorError exception if the creation fails.
-        """
-        if self.__socket is None:
-            raise CreatorError('Socket requested on terminated creator', True)
-        # First, assemble the request from parts
-        logger.info(BIND10_SOCKET_GET, address, port, socktype)
-        data = b'S'
-        if socktype == 'UDP' or socktype == socket.SOCK_DGRAM:
-            data += b'U'
-        elif socktype == 'TCP' or socktype == socket.SOCK_STREAM:
-            data += b'T'
-        else:
-            raise ValueError('Unknown socket type: ' + str(socktype))
-        if address.family == socket.AF_INET:
-            data += b'4'
-        elif address.family == socket.AF_INET6:
-            data += b'6'
-        else:
-            raise ValueError('Unknown address family in address')
-        data += struct.pack('!H', port)
-        data += address.addr
-        try:
-            # Send the request
-            self.__socket.sendall(data)
-            answer = self.__socket.recv(1)
-            if answer == b'S':
-                # Success!
-                result = self.__socket.read_fd()
-                logger.info(BIND10_SOCKET_CREATED, result)
-                return result
-            elif answer == b'E':
-                # There was an error, read the error as well
-                error = self.__socket.recv(1)
-                errno = struct.unpack('i',
-                                      self.__read_all(len(struct.pack('i',
-                                                                      0))))
-                if error == b'S':
-                    cause = 'socket'
-                elif error == b'B':
-                    cause = 'bind'
-                else:
-                    self.__socket = None
-                    logger.fatal(BIND10_SOCKCREATOR_BAD_CAUSE, error)
-                    raise CreatorError('Unknown error cause' + str(answer), True)
-                logger.error(BIND10_SOCKET_ERROR, cause, errno[0],
-                             os.strerror(errno[0]))
-                raise CreatorError('Error creating socket on ' + cause, False,
-                                   errno[0])
-            else:
-                self.__socket = None
-                logger.fatal(BIND10_SOCKCREATOR_BAD_RESPONSE, answer)
-                raise CreatorError('Unknown response ' + str(answer), True)
-        except socket.error as se:
-            self.__socket = None
-            logger.fatal(BIND10_SOCKCREATOR_TRANSPORT_ERROR, str(se))
-            raise CreatorError(str(se), True)
-
-    def __read_all(self, length):
-        """
-        Keeps reading until length data is read or EOF or error happens.
-
-        EOF is considered error as well and throws a CreatorError.
-        """
-        result = b''
-        while len(result) < length:
-            data = self.__socket.recv(length - len(result))
-            if len(data) == 0:
-                self.__socket = None
-                logger.fatal(BIND10_SOCKCREATOR_EOF)
-                raise CreatorError('Unexpected EOF', True)
-            result += data
-        return result
-
-class WrappedSocket:
-    """
-    This class wraps a socket and adds a read_fd method, so it can be used
-    for the Parser class conveniently. It simply copies all its guts into
-    itself and implements the method.
-    """
-    def __init__(self, socket):
-        # Copy whatever can be copied from the socket
-        for name in dir(socket):
-            if name not in ['__class__', '__weakref__']:
-                setattr(self, name, getattr(socket, name))
-        # Keep the socket, so we can prevent it from being garbage-collected
-        # and closed before we are removed ourself
-        self.__orig_socket = socket
-
-    def read_fd(self):
-        """
-        Read the file descriptor from the socket.
-        """
-        return recv_fd(self.fileno())
-
-# FIXME: Any idea how to test this? Starting an external process doesn't sound
-# OK
-class Creator(Parser):
-    """
-    This starts the socket creator and allows asking for the sockets.
-    """
-    def __init__(self, path):
-        (local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
-        # Popen does not like, for some reason, having the same socket for
-        # stdin as well as stdout, so we dup it before passing it there.
-        remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
-                                socket.SOCK_STREAM)
-        env = os.environ
-        env['PATH'] = path
-        self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
-                                          stdin=remote.fileno(),
-                                          stdout=remote2.fileno())
-        remote.close()
-        remote2.close()
-        Parser.__init__(self, WrappedSocket(local))
-
-    def pid(self):
-        return self.__process.pid
-
-    def kill(self):
-        logger.warn(BIND10_SOCKCREATOR_KILL)
-        if self.__process is not None:
-            self.__process.kill()
-            self.__process = None
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 6d758b3..d9e012f 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 #PYTESTS = args_test.py bind10_test.py
 # NOTE: this has a generated test found in the builddir
-PYTESTS = bind10_test.py sockcreator_test.py
+PYTESTS = bind10_test.py
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
diff --git a/src/bin/bind10/tests/sockcreator_test.py.in b/src/bin/bind10/tests/sockcreator_test.py.in
deleted file mode 100644
index 53e7035..0000000
--- a/src/bin/bind10/tests/sockcreator_test.py.in
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# This test file is generated .py.in -> .py just to be in the build dir,
-# same as the rest of the tests. Saves a lot of stuff in makefile.
-
-"""
-Tests for the bind10.sockcreator module.
-"""
-
-import unittest
-import struct
-import socket
-from isc.net.addr import IPAddr
-import isc.log
-from libutil_io_python import send_fd
-from bind10.sockcreator import Parser, CreatorError, WrappedSocket
-
-class FakeCreator:
-    """
-    Class emulating the socket to the socket creator. It can be given expected
-    data to receive (and check) and responses to give to the Parser class
-    during testing.
-    """
-
-    class InvalidPlan(Exception):
-        """
-        Raised when someone wants to recv when sending is planned or vice
-        versa.
-        """
-        pass
-
-    class InvalidData(Exception):
-        """
-        Raises when the data passed to sendall are not the same as expected.
-        """
-        pass
-
-    def __init__(self, plan):
-        """
-        Create the object. The plan variable contains list of expected actions,
-        in form:
-
-        [('r', 'Data to return from recv'), ('s', 'Data expected on sendall'),
-             , ('d', 'File descriptor number to return from read_sock'), ('e',
-             None), ...]
-
-        It modifies the array as it goes.
-        """
-        self.__plan = plan
-
-    def __get_plan(self, expected):
-        if len(self.__plan) == 0:
-            raise InvalidPlan('Nothing more planned')
-        (kind, data) = self.__plan[0]
-        if kind == 'e':
-            self.__plan.pop(0)
-            raise socket.error('False socket error')
-        if kind != expected:
-            raise InvalidPlan('Planned ' + kind + ', but ' + expected +
-                'requested')
-        return data
-
-    def recv(self, maxsize):
-        """
-        Emulate recv. Returs maxsize bytes from the current recv plan. If
-        there are data left from previous recv call, it is used first.
-
-        If no recv is planned, raises InvalidPlan.
-        """
-        data = self.__get_plan('r')
-        result, rest = data[:maxsize], data[maxsize:]
-        if len(rest) > 0:
-            self.__plan[0] = ('r', rest)
-        else:
-            self.__plan.pop(0)
-        return result
-
-    def read_fd(self):
-        """
-        Emulate the reading of file descriptor. Returns one from a plan.
-
-        It raises InvalidPlan if no socket is planned now.
-        """
-        fd = self.__get_plan('f')
-        self.__plan.pop(0)
-        return fd
-
-    def sendall(self, data):
-        """
-        Checks that the data passed are correct according to plan. It raises
-        InvalidData if the data differs or InvalidPlan when sendall is not
-        expected.
-        """
-        planned = self.__get_plan('s')
-        dlen = len(data)
-        prefix, rest = planned[:dlen], planned[dlen:]
-        if prefix != data:
-            raise InvalidData('Expected "' + str(prefix)+ '", got "' +
-                str(data) + '"')
-        if len(rest) > 0:
-            self.__plan[0] = ('s', rest)
-        else:
-            self.__plan.pop(0)
-
-    def all_used(self):
-        """
-        Returns if the whole plan was consumed.
-        """
-        return len(self.__plan) == 0
-
-class ParserTests(unittest.TestCase):
-    """
-    Testcases for the Parser class.
-
-    A lot of these test could be done by
-    `with self.assertRaises(CreatorError) as cm`. But some versions of python
-    take the scope wrong and don't work, so we use the primitive way of
-    try-except.
-    """
-    def __terminate(self):
-        creator = FakeCreator([('s', b'T'), ('r', b'')])
-        parser = Parser(creator)
-        self.assertEqual(None, parser.terminate())
-        self.assertTrue(creator.all_used())
-        return parser
-
-    def test_terminate(self):
-        """
-        Test if the command to terminate is correct and it waits for reading the
-        EOF.
-        """
-        self.__terminate()
-
-    def __terminate_raises(self, parser):
-        """
-        Check that terminate() raises a fatal exception.
-        """
-        try:
-            parser.terminate()
-            self.fail("Not raised")
-        except CreatorError as ce:
-            self.assertTrue(ce.fatal)
-            self.assertEqual(None, ce.errno)
-
-    def test_terminate_error1(self):
-        """
-        Test it reports an exception when there's error terminating the creator.
-        This one raises an error when receiving the EOF.
-        """
-        creator = FakeCreator([('s', b'T'), ('e', None)])
-        parser = Parser(creator)
-        self.__terminate_raises(parser)
-
-    def test_terminate_error2(self):
-        """
-        Test it reports an exception when there's error terminating the creator.
-        This one raises an error when sending data.
-        """
-        creator = FakeCreator([('e', None)])
-        parser = Parser(creator)
-        self.__terminate_raises(parser)
-
-    def test_terminate_error3(self):
-        """
-        Test it reports an exception when there's error terminating the creator.
-        This one sends data when it should have terminated.
-        """
-        creator = FakeCreator([('s', b'T'), ('r', b'Extra data')])
-        parser = Parser(creator)
-        self.__terminate_raises(parser)
-
-    def test_terminate_twice(self):
-        """
-        Test we can't terminate twice.
-        """
-        parser = self.__terminate()
-        self.__terminate_raises(parser)
-
-    def test_crash(self):
-        """
-        Tests that the parser correctly raises exception when it crashes
-        unexpectedly.
-        """
-        creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'')])
-        parser = Parser(creator)
-        try:
-            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
-            self.fail("Not raised")
-        except CreatorError as ce:
-            self.assertTrue(creator.all_used())
-            # Is the exception correct?
-            self.assertTrue(ce.fatal)
-            self.assertEqual(None, ce.errno)
-
-    def test_error(self):
-        """
-        Tests that the parser correctly raises non-fatal exception when
-        the socket can not be created.
-        """
-        # We split the int to see if it can cope with data coming in
-        # different packets
-        intpart = struct.pack('@i', 42)
-        creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'ES' +
-            intpart[:1]), ('r', intpart[1:])])
-        parser = Parser(creator)
-        try:
-            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
-            self.fail("Not raised")
-        except CreatorError as ce:
-            self.assertTrue(creator.all_used())
-            # Is the exception correct?
-            self.assertFalse(ce.fatal)
-            self.assertEqual(42, ce.errno)
-
-    def __error(self, plan):
-        creator = FakeCreator(plan)
-        parser = Parser(creator)
-        try:
-            parser.get_socket(IPAddr('0.0.0.0'), 0, socket.SOCK_DGRAM)
-            self.fail("Not raised")
-        except CreatorError as ce:
-            self.assertTrue(creator.all_used())
-            self.assertTrue(ce.fatal)
-
-    def test_error_send(self):
-        self.__error([('e', None)])
-
-    def test_error_recv(self):
-        self.__error([('s', b'SU4\0\0\0\0\0\0'), ('e', None)])
-
-    def test_error_read_fd(self):
-        self.__error([('s', b'SU4\0\0\0\0\0\0'), ('r', b'S'), ('e', None)])
-
-    def __create(self, addr, socktype, encoded):
-        creator = FakeCreator([('s', b'S' + encoded), ('r', b'S'), ('f', 42)])
-        parser = Parser(creator)
-        self.assertEqual(42, parser.get_socket(IPAddr(addr), 42, socktype))
-
-    def test_create1(self):
-        self.__create('192.0.2.0', 'UDP', b'U4\0\x2A\xC0\0\x02\0')
-
-    def test_create2(self):
-        self.__create('2001:db8::', socket.SOCK_STREAM,
-            b'T6\0\x2A\x20\x01\x0d\xb8\0\0\0\0\0\0\0\0\0\0\0\0')
-
-    def test_create_terminated(self):
-        """
-        Test we can't request sockets after it was terminated.
-        """
-        parser = self.__terminate()
-        try:
-            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
-            self.fail("Not raised")
-        except CreatorError as ce:
-            self.assertTrue(ce.fatal)
-            self.assertEqual(None, ce.errno)
-
-    def test_invalid_socktype(self):
-        """
-        Test invalid socket type is rejected
-        """
-        self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
-                          IPAddr('0.0.0.0'), 42, 'RAW')
-
-    def test_invalid_family(self):
-        """
-        Test it rejects invalid address family.
-        """
-        # Note: this produces a bad logger output, since this address
-        # can not be converted to string, so the original message with
-        # placeholders is output. This should not happen in practice, so
-        # it is harmless.
-        addr = IPAddr('0.0.0.0')
-        addr.family = 42
-        self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
-                          addr, 42, socket.SOCK_DGRAM)
-
-class WrapTests(unittest.TestCase):
-    """
-    Tests for the wrap_socket function.
-    """
-    def test_wrap(self):
-        # We construct two pairs of socket. The receiving side of one pair will
-        # be wrapped. Then we send one of the other pair through this pair and
-        # check the received one can be used as a socket
-
-        # The transport socket
-        (t1, t2) = socket.socketpair()
-        # The payload socket
-        (p1, p2) = socket.socketpair()
-
-        t2 = WrappedSocket(t2)
-
-        # Transfer the descriptor
-        send_fd(t1.fileno(), p1.fileno())
-        p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
-
-        # Now, pass some data trough the socket
-        p1.send(b'A')
-        data = p2.recv(1)
-        self.assertEqual(b'A', data)
-
-        # Test the wrapping didn't hurt the socket's usual methods
-        t1.send(b'B')
-        data = t2.recv(1)
-        self.assertEqual(b'B', data)
-        t2.send(b'C')
-        data = t1.recv(1)
-        self.assertEqual(b'C', data)
-
-if __name__ == '__main__':
-    isc.log.init("bind10") # FIXME Should this be needed?
-    isc.log.resetUnitTestRootLogger()
-    unittest.main()
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
index 55b941f..e007296 100644
--- a/src/bin/cmdctl/cmdctl_messages.mes
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -69,7 +69,7 @@ There was a keyboard interrupt signal to stop the cmdctl daemon. The
 daemon will now shut down.
 
 % CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1
-The b10-cdmctl daemon encountered an uncaught exception and
+The b10-cmdctl daemon encountered an uncaught exception and
 will now shut down. This is indicative of a programming error and
 should not happen under normal circumstances. The exception message
 is printed.
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index 8d341cb..824e8a8 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -35,6 +35,7 @@ b10_dhcp6_SOURCES = main.cc
 b10_dhcp6_SOURCES += dhcp6.h
 b10_dhcp6_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/util/libutil.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/host/Makefile.am b/src/bin/host/Makefile.am
index ec34ce7..a8f96c2 100644
--- a/src/bin/host/Makefile.am
+++ b/src/bin/host/Makefile.am
@@ -13,6 +13,7 @@ CLEANFILES = *.gcno *.gcda
 bin_PROGRAMS = b10-host
 b10_host_SOURCES = host.cc
 b10_host_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+b10_host_LDADD += $(top_builddir)/src/lib/util/libutil.la
 b10_host_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
 man_MANS = b10-host.1
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
index b44115a..7930c52 100644
--- a/src/bin/resolver/resolver_messages.mes
+++ b/src/bin/resolver/resolver_messages.mes
@@ -78,7 +78,7 @@ specified, it will appear once for each address.
 % RESOLVER_FORWARD_QUERY processing forward query
 This is a debug message indicating that a query received by the resolver
 has passed a set of checks (message is well-formed, it is allowed by the
-ACL, it is a supported opcode etc.) and is being forwarded to upstream
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
 servers.
 
 % RESOLVER_HEADER_ERROR message received, exception when processing header: %1
@@ -116,7 +116,7 @@ so is returning a REFUSED response to the sender.
 % RESOLVER_NORMAL_QUERY processing normal query
 This is a debug message indicating that the query received by the resolver
 has passed a set of checks (message is well-formed, it is allowed by the
-ACL, it is a supported opcode etc.) and is being processed the resolver.
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
 
 % RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
 The resolver has received a NOTIFY message.  As the server is not
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index d0f7e2c..0e984dc 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -49,14 +49,14 @@ An unknown command has been sent to the stats-httpd module. The
 stats-httpd module will respond with an error, and the command will
 be ignored.
 
-% STATHTTPD_SERVER_ERROR http server error: %1
-An internal error occurred while handling an http request. A HTTP 500
+% STATHTTPD_SERVER_ERROR HTTP server error: %1
+An internal error occurred while handling an HTTP request. An HTTP 500
 response will be sent back, and the specific error is printed. This
 is an error condition that likely points to a module that is not
 responding correctly to statistic requests.
 
-% STATHTTPD_SERVER_INIT_ERROR http server initialization error: %1
-There was a problem initializing the http server in the stats-httpd
+% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
 module upon receiving its configuration data. The most likely cause
 is a port binding problem or a bad configuration value. The specific
 error is printed in the message. The new configuration is ignored,
@@ -65,8 +65,8 @@ and an error is sent back.
 % STATHTTPD_SHUTDOWN shutting down
 The stats-httpd daemon is shutting down.
 
-% STATHTTPD_START_SERVER_INIT_ERROR http server initialization error: %1
-There was a problem initializing the http server in the stats-httpd
+% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
 module upon startup. The most likely cause is that it was not able
 to bind to the listening port. The specific error is printed, and the
 module will shut down.
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 3d3f3dc..0dc5021 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -1,5 +1,6 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = process_rename_test.py
+noinst_SCRIPTS = $(PYTESTS)
 # .py will be generated by configure, so we don't have to include it
 # in EXTRA_DIST.
 
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 4b45210..f96c023 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -38,8 +38,10 @@ class TestRename(unittest.TestCase):
         Then scan them by looking at the source text
         (without actually running them)
         """
-        # Regexp to find all the *_SCRIPTS = something lines,
-        # including line continuations (backslash and newline)
+        # Regexp to find all the *_SCRIPTS = something lines (except for
+        # noinst_SCRIPTS, which are scripts for tests), including line
+        # continuations (backslash and newline)
+        excluded_lines = re.compile(r'^(noinst_SCRIPTS.*$)', re.MULTILINE)
         lines = re.compile(r'^\w+_SCRIPTS\s*=\s*((.|\\\n)*)$',
             re.MULTILINE)
         # Script name regular expression
@@ -53,7 +55,8 @@ class TestRename(unittest.TestCase):
             if 'Makefile' in fs:
                 makefile = ''.join(open(os.path.join(d,
                     "Makefile")).readlines())
-                for (var, _) in lines.findall(makefile):
+                for (var, _) in lines.findall(re.sub(excluded_lines, '',
+                                                     makefile)):
                     for (script, _) in scripts.findall(var):
                         self.__scan(d, script, fun)
 
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 19b104e..121b2ad 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -48,7 +48,7 @@ There was a problem reading from the command and control channel. The
 most likely cause is that the msgq daemon is not running.
 
 % XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
-There was a problem reading a response from antoher module over the
+There was a problem reading a response from another module over the
 command and control channel. The most likely cause is that the
 configuration manager b10-cfgmgr is not running.
 
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 8ab5f7a..34e6622 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -6,11 +6,13 @@ pkglibexec_SCRIPTS = b10-zonemgr
 
 b10_zonemgrdir = $(pkgdatadir)
 b10_zonemgr_DATA = zonemgr.spec
+pyexec_DATA = zonemgr_messages.py
 
-CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+CLEANFILES  = b10-zonemgr zonemgr.pyc zonemgr.spec
+CLEANFILES += zonemgr_messages.py zonemgr_messages.pyc
 
 man_MANS = b10-zonemgr.8
-EXTRA_DIST = $(man_MANS) b10-zonemgr.xml
+EXTRA_DIST = $(man_MANS) b10-zonemgr.xml zonemgr_messages.mes
 
 if ENABLE_MAN
 
@@ -19,6 +21,10 @@ b10-zonemgr.8: b10-zonemgr.xml
 
 endif
 
+# Build logging source file from message files
+zonemgr_messages.py: zonemgr_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/zonemgr/zonemgr_messages.mes
+
 zonemgr.spec: zonemgr.spec.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.spec.pre >$@
 
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index 845190b..87a0092 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -37,10 +37,16 @@ from isc.datasrc import sqlite3_ds
 from optparse import OptionParser, OptionValueError
 from isc.config.ccsession import *
 import isc.util.process
+from zonemgr_messages import *
 
 # Initialize logging for called modules.
-# TODO: Log messages properly
 isc.log.init("b10-zonemgr")
+logger = isc.log.Logger("zonemgr")
+
+# Constants for debug levels, to be removed when we have #1074.
+DBG_START_SHUT = 0
+DBG_ZONEMGR_COMMAND = 10
+DBG_ZONEMGR_BASIC = 40
 
 isc.util.process.rename()
 
@@ -81,13 +87,6 @@ REFRESH_OFFSET = 3
 RETRY_OFFSET = 4
 EXPIRED_OFFSET = 5
 
-# verbose mode
-VERBOSE_MODE = False
-
-def log_msg(msg):
-    if VERBOSE_MODE:
-        sys.stdout.write("[b10-zonemgr] %s\n" % str(msg))
-
 class ZonemgrException(Exception):
     pass
 
@@ -97,7 +96,6 @@ class ZonemgrRefresh:
     do zone refresh.
     Zone timers can be started by calling run_timer(), and it
     can be stopped by calling shutdown() in another thread.
-
     """
 
     def __init__(self, cc, db_file, slave_socket, config_data):
@@ -161,6 +159,7 @@ class ZonemgrRefresh:
     def zone_refresh_success(self, zone_name_class):
         """Update zone info after zone refresh success"""
         if (self._zone_not_exist(zone_name_class)):
+            logger.error(ZONEMGR_UNKNOWN_ZONE_SUCCESS, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
                                    "belong to zonemgr" % zone_name_class)
         self.zonemgr_reload_zone(zone_name_class)
@@ -171,6 +170,7 @@ class ZonemgrRefresh:
     def zone_refresh_fail(self, zone_name_class):
         """Update zone info after zone refresh fail"""
         if (self._zone_not_exist(zone_name_class)):
+            logger.error(ZONEMGR_UNKNOWN_ZONE_FAIL, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
                                    "belong to zonemgr" % zone_name_class)
         # Is zone expired?
@@ -183,6 +183,7 @@ class ZonemgrRefresh:
     def zone_handle_notify(self, zone_name_class, master):
         """Handle zone notify"""
         if (self._zone_not_exist(zone_name_class)):
+            logger.error(ZONEMGR_UNKNOWN_ZONE_NOTIFIED, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] Notified zone (%s, %s) "
                                    "doesn't belong to zonemgr" % zone_name_class)
         self._set_zone_notifier_master(zone_name_class, master)
@@ -195,10 +196,12 @@ class ZonemgrRefresh:
 
     def zonemgr_add_zone(self, zone_name_class):
         """ Add a zone into zone manager."""
-        log_msg("Loading zone (%s, %s)" % zone_name_class)
+
+        logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_LOAD_ZONE, zone_name_class[0], zone_name_class[1])
         zone_info = {}
         zone_soa = sqlite3_ds.get_zone_soa(str(zone_name_class[0]), self._db_file)
         if not zone_soa:
+            logger.error(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] zone (%s, %s) doesn't have soa." % zone_name_class)
         zone_info["zone_soa_rdata"] = zone_soa[7]
         zone_info["zone_state"] = ZONE_OK
@@ -269,7 +272,7 @@ class ZonemgrRefresh:
             except isc.cc.session.SessionTimeout:
                 pass        # for now we just ignore the failure
         except socket.error:
-            sys.stderr.write("[b10-zonemgr] Failed to send to module %s, the session has been closed." % module_name)
+            logger.error(ZONEMGR_SEND_FAIL, module_name)
 
     def _find_need_do_refresh_zone(self):
         """Find the first zone need do refresh, if no zone need
@@ -278,7 +281,8 @@ class ZonemgrRefresh:
         zone_need_refresh = None
         for zone_name_class in self._zonemgr_refresh_info.keys():
             zone_state = self._get_zone_state(zone_name_class)
-            # If hasn't received refresh response but are within refresh timeout, skip the zone
+            # If hasn't received refresh response but are within refresh
+            # timeout, skip the zone
             if (ZONE_REFRESHING == zone_state and
                 (self._get_zone_refresh_timeout(zone_name_class) > self._get_current_time())):
                 continue
@@ -298,7 +302,7 @@ class ZonemgrRefresh:
 
     def _do_refresh(self, zone_name_class):
         """Do zone refresh."""
-        log_msg("Do refresh for zone (%s, %s)." % zone_name_class)
+        logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_REFRESH_ZONE, zone_name_class[0], zone_name_class[1])
         self._set_zone_state(zone_name_class, ZONE_REFRESHING)
         self._set_zone_refresh_timeout(zone_name_class, self._get_current_time() + self._max_transfer_timeout)
         notify_master = self._get_zone_notifier_master(zone_name_class)
@@ -355,7 +359,7 @@ class ZonemgrRefresh:
                 if e.args[0] == errno.EINTR:
                     (rlist, wlist, xlist) = ([], [], [])
                 else:
-                    sys.stderr.write("[b10-zonemgr] Error with select(); %s\n" % e)
+                    logger.error(ZONEMGR_SELECT_ERROR, e);
                     break
 
             for fd in rlist:
@@ -369,12 +373,14 @@ class ZonemgrRefresh:
 
     def run_timer(self, daemon=False):
         """
-        Keep track of zone timers. Spawns and starts a thread. The thread object is returned.
+        Keep track of zone timers. Spawns and starts a thread. The thread object
+        is returned.
 
         You can stop it by calling shutdown().
         """
         # Small sanity check
         if self._running:
+            logger.error(ZONEMGR_TIMER_THREAD_RUNNING)
             raise RuntimeError("Trying to run the timers twice at the same time")
 
         # Prepare the launch
@@ -399,6 +405,7 @@ class ZonemgrRefresh:
         called from a different thread.
         """
         if not self._running:
+            logger.error(ZONEMGR_NO_TIMER_THREAD)
             raise RuntimeError("Trying to shutdown, but not running")
 
         # Ask the thread to stop
@@ -519,8 +526,8 @@ class Zonemgr:
         return db_file
 
     def shutdown(self):
-        """Shutdown the zonemgr process. the thread which is keeping track of zone
-        timers should be terminated.
+        """Shutdown the zonemgr process. The thread which is keeping track of
+           zone timers should be terminated.
         """
         self._zone_refresh.shutdown()
 
@@ -560,17 +567,17 @@ class Zonemgr:
         # jitter should not be bigger than half of the original value
         if config_data.get('refresh_jitter') > 0.5:
             config_data['refresh_jitter'] = 0.5
-            log_msg("[b10-zonemgr] refresh_jitter is too big, its value will "
-                      "be set to 0.5")
-
+            logger.warn(ZONEMGR_JITTER_TOO_BIG)
 
     def _parse_cmd_params(self, args, command):
         zone_name = args.get("zone_name")
         if not zone_name:
+            logger.error(ZONEMGR_NO_ZONE_NAME)
             raise ZonemgrException("zone name should be provided")
 
         zone_class = args.get("zone_class")
         if not zone_class:
+            logger.error(ZONEMGR_NO_ZONE_CLASS)
             raise ZonemgrException("zone class should be provided")
 
         if (command != ZONE_NOTIFY_COMMAND):
@@ -578,6 +585,7 @@ class Zonemgr:
 
         master_str = args.get("master")
         if not master_str:
+            logger.error(ZONEMGR_NO_MASTER_ADDRESS)
             raise ZonemgrException("master address should be provided")
 
         return ((zone_name, zone_class), master_str)
@@ -585,15 +593,16 @@ class Zonemgr:
 
     def command_handler(self, command, args):
         """Handle command receivd from command channel.
-        ZONE_NOTIFY_COMMAND is issued by Auth process; ZONE_XFRIN_SUCCESS_COMMAND
-        and ZONE_XFRIN_FAILED_COMMAND are issued by Xfrin process; shutdown is issued
-        by a user or Boss process. """
+        ZONE_NOTIFY_COMMAND is issued by Auth process;
+        ZONE_XFRIN_SUCCESS_COMMAND and ZONE_XFRIN_FAILED_COMMAND are issued by
+        Xfrin process;
+        shutdown is issued by a user or Boss process. """
         answer = create_answer(0)
         if command == ZONE_NOTIFY_COMMAND:
             """ Handle Auth notify command"""
             # master is the source sender of the notify message.
             zone_name_class, master = self._parse_cmd_params(args, command)
-            log_msg("Received notify command for zone (%s, %s)." % zone_name_class)
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_NOTIFY, zone_name_class[0], zone_name_class[1])
             with self._lock:
                 self._zone_refresh.zone_handle_notify(zone_name_class, master)
             # Send notification to zonemgr timer thread
@@ -602,6 +611,7 @@ class Zonemgr:
         elif command == ZONE_XFRIN_SUCCESS_COMMAND:
             """ Handle xfrin success command"""
             zone_name_class = self._parse_cmd_params(args, command)
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_SUCCESS, zone_name_class[0], zone_name_class[1])
             with self._lock:
                 self._zone_refresh.zone_refresh_success(zone_name_class)
             self._master_socket.send(b" ")# make self._slave_socket readble
@@ -609,14 +619,17 @@ class Zonemgr:
         elif command == ZONE_XFRIN_FAILED_COMMAND:
             """ Handle xfrin fail command"""
             zone_name_class = self._parse_cmd_params(args, command)
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_FAILED, zone_name_class[0], zone_name_class[1])
             with self._lock:
                 self._zone_refresh.zone_refresh_fail(zone_name_class)
             self._master_socket.send(b" ")# make self._slave_socket readble
 
         elif command == "shutdown":
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_SHUTDOWN)
             self.shutdown()
 
         else:
+            logger.warn(ZONEMGR_RECEIVE_UNKNOWN, str(command))
             answer = create_answer(1, "Unknown command:" + str(command))
 
         return answer
@@ -643,25 +656,29 @@ def set_cmd_options(parser):
 
 if '__main__' == __name__:
     try:
+        logger.debug(DBG_START_SHUT, ZONEMGR_STARTING)
         parser = OptionParser()
         set_cmd_options(parser)
         (options, args) = parser.parse_args()
-        VERBOSE_MODE = options.verbose
+        if options.verbose:
+            logger.set_severity("DEBUG", 99)
 
         set_signal_handler()
         zonemgrd = Zonemgr()
         zonemgrd.run()
     except KeyboardInterrupt:
-        sys.stderr.write("[b10-zonemgr] exit zonemgr process\n")
+        logger.info(ZONEMGR_KEYBOARD_INTERRUPT)
+
     except isc.cc.session.SessionError as e:
-        sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
-                           "is the command channel daemon running?\n")
+        logger.error(ZONEMGR_SESSION_ERROR)
+
     except isc.cc.session.SessionTimeout as e:
-        sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
-                           "is the configuration manager running?\n")
+        logger.error(ZONEMGR_SESSION_TIMEOUT)
+
     except isc.config.ModuleCCSessionError as e:
-        sys.stderr.write("[b10-zonemgr] exit zonemgr process: %s\n" % str(e))
+        logger.error(ZONEMGR_CCSESSION_ERROR, str(e))
 
     if zonemgrd and zonemgrd.running:
         zonemgrd.shutdown()
 
+    logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
new file mode 100644
index 0000000..8abec5d
--- /dev/null
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -0,0 +1,145 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the zonemgr messages python module.
+
+% ZONEMGR_CCSESSION_ERROR command channel session error: %1
+An error was encountered on the command channel.  The message indicates
+the nature of the error.
+
+% ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+
+% ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+
+% ZONEMGR_LOAD_ZONE loading zone %1 (class %2)
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+
+% ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received.  This may be due to an internal programming error; please
+submit a bug report.
+
+% ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record.  The load has
+been abandoned.
+
+% ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running.  This may indicate an
+internal program error.  Please submit a bug report.
+
+% ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel.  The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+
+% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+
+% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel.  The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error.  Please submit a bug report.
+
+% ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel.  The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+
+% ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel.  The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+
+% ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+
+% ZONEMGR_SELECT_ERROR error with select(): %1
+An attempt to wait for input from a socket failed.  The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+
+% ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed.  The session between the modules has been closed.
+
+% ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SHUTDOWN zone manager has shut down
+A debug message, output when the zone manager has shut down completely.
+
+% ZONEMGR_STARTING zone manager starting
+A debug message output when the zone manager starts up.
+
+% ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running.  It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer.  Please submit
+a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager.  This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case).  Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager.  This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case).  Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager.  This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
index 3e11ede..feb75d4 100644
--- a/src/lib/asiodns/asiodns_messages.mes
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -26,13 +26,13 @@ enabled.
 % ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
 The asynchronous I/O code encountered an error when trying to open a socket
 of the specified protocol in order to send a message to the target address.
-The number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
 message.
 
 % ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
 The asynchronous I/O code encountered an error when trying to read data from
 the specified address on the given protocol.  The number of the system
-error that cause the problem is given in the message.
+error that caused the problem is given in the message.
 
 % ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
 An upstream fetch from the specified address timed out.  This may happen for
@@ -41,9 +41,9 @@ or a problem on the network.  The message will only appear if debug is
 enabled.
 
 % ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol.  The number of the system
+error that caused the problem is given in the message.
 
 % ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
 An internal consistency check on the origin of a message from the
diff --git a/src/lib/bench/tests/Makefile.am b/src/lib/bench/tests/Makefile.am
index 3ebdf29..3f8a678 100644
--- a/src/lib/bench/tests/Makefile.am
+++ b/src/lib/bench/tests/Makefile.am
@@ -16,6 +16,7 @@ run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 run_unittests_LDADD  = $(top_builddir)/src/lib/bench/libbench.la
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(GTEST_LDADD)
diff --git a/src/lib/cache/tests/Makefile.am b/src/lib/cache/tests/Makefile.am
index f9237af..a215c56 100644
--- a/src/lib/cache/tests/Makefile.am
+++ b/src/lib/cache/tests/Makefile.am
@@ -56,6 +56,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
index 8c62ea1..8370cdd 100644
--- a/src/lib/cc/cc_messages.mes
+++ b/src/lib/cc/cc_messages.mes
@@ -53,11 +53,11 @@ Debug message, we're about to send a message over the command channel.
 This happens when garbage comes over the command channel or some kind of
 confusion happens in the program. The data received from the socket make no
 sense if we interpret it as lengths of message. The first one is total length
-of message, the second length of the header. The header and it's length
-(2 bytes) is counted in the total length.
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
 
 % CC_LENGTH_NOT_READY length not ready
-There should be data representing length of message on the socket, but it
+There should be data representing the length of message on the socket, but it
 is not there.
 
 % CC_NO_MESSAGE no message ready to be received yet
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 97d5cf1..e0e24cf 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -119,7 +119,7 @@ private:
 void
 SessionImpl::establish(const char& socket_file) {
     try {
-        LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(socket_file);
+        LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(&socket_file);
         socket_.connect(asio::local::stream_protocol::endpoint(&socket_file),
                         error_);
         LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISHED);
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 261baae..db67781 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -22,6 +22,8 @@ libdatasrc_la_SOURCES += zone.h
 libdatasrc_la_SOURCES += result.h
 libdatasrc_la_SOURCES += logger.h logger.cc
 libdatasrc_la_SOURCES += client.h
+libdatasrc_la_SOURCES += database.h database.cc
+libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
 nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
 libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index a830f00..9fe6519 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,6 +15,8 @@
 #ifndef __DATA_SOURCE_CLIENT_H
 #define __DATA_SOURCE_CLIENT_H 1
 
+#include <boost/noncopyable.hpp>
+
 #include <datasrc/zone.h>
 
 namespace isc {
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
new file mode 100644
index 0000000..0e1418d
--- /dev/null
+++ b/src/lib/datasrc/database.cc
@@ -0,0 +1,87 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+#include <dns/name.h>
+
+using isc::dns::Name;
+
+namespace isc {
+namespace datasrc {
+
+DatabaseClient::DatabaseClient(boost::shared_ptr<DatabaseAccessor>
+                               database) :
+    database_(database)
+{
+    if (database_.get() == NULL) {
+        isc_throw(isc::InvalidParameter,
+                  "No database provided to DatabaseClient");
+    }
+}
+
+DataSourceClient::FindResult
+DatabaseClient::findZone(const Name& name) const {
+    std::pair<bool, int> zone(database_->getZone(name));
+    // Try exact first
+    if (zone.first) {
+        return (FindResult(result::SUCCESS,
+                           ZoneFinderPtr(new Finder(database_,
+                                                    zone.second))));
+    }
+    // Than super domains
+    // Start from 1, as 0 is covered above
+    for (size_t i(1); i < name.getLabelCount(); ++i) {
+        zone = database_->getZone(name.split(i));
+        if (zone.first) {
+            return (FindResult(result::PARTIALMATCH,
+                               ZoneFinderPtr(new Finder(database_,
+                                                        zone.second))));
+        }
+    }
+    // No, really nothing
+    return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+}
+
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor>
+                               database, int zone_id) :
+    database_(database),
+    zone_id_(zone_id)
+{ }
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name&,
+                             const isc::dns::RRType&,
+                             isc::dns::RRsetList*,
+                             const FindOptions) const
+{
+    // TODO Implement
+    return (FindResult(SUCCESS, isc::dns::ConstRRsetPtr()));
+}
+
+Name
+DatabaseClient::Finder::getOrigin() const {
+    // TODO Implement
+    return (Name("."));
+}
+
+isc::dns::RRClass
+DatabaseClient::Finder::getClass() const {
+    // TODO Implement
+    return isc::dns::RRClass::IN();
+}
+
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
new file mode 100644
index 0000000..1f6bd22
--- /dev/null
+++ b/src/lib/datasrc/database.h
@@ -0,0 +1,187 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATABASE_DATASRC_H
+#define __DATABASE_DATASRC_H
+
+#include <datasrc/client.h>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Abstraction of lowlevel database with DNS data
+ *
+ * This class is defines interface to databases. Each supported database
+ * will provide methods for accessing the data stored there in a generic
+ * manner. The methods are meant to be low-level, without much or any knowledge
+ * about DNS and should be possible to translate directly to queries.
+ *
+ * On the other hand, how the communication with database is done and in what
+ * schema (in case of relational/SQL database) is up to the concrete classes.
+ *
+ * This class is non-copyable, as copying connections to database makes little
+ * sense and will not be needed.
+ *
+ * \todo Is it true this does not need to be copied? For example the zone
+ *     iterator might need it's own copy. But a virtual clone() method might
+ *     be better for that than copy constructor.
+ *
+ * \note The same application may create multiple connections to the same
+ *     database, having multiple instances of this class. If the database
+ *     allows having multiple open queries at one connection, the connection
+ *     class may share it.
+ */
+class DatabaseAccessor : boost::noncopyable {
+public:
+    /**
+     * \brief Destructor
+     *
+     * It is empty, but needs a virtual one, since we will use the derived
+     * classes in polymorphic way.
+     */
+    virtual ~DatabaseAccessor() { }
+    /**
+     * \brief Retrieve a zone identifier
+     *
+     * This method looks up a zone for the given name in the database. It
+     * should match only exact zone name (eg. name is equal to the zone's
+     * apex), as the DatabaseClient will loop trough the labels itself and
+     * find the most suitable zone.
+     *
+     * It is not specified if and what implementation of this method may throw,
+     * so code should expect anything.
+     *
+     * \param name The name of the zone's apex to be looked up.
+     * \return The first part of the result indicates if a matching zone
+     *     was found. In case it was, the second part is internal zone ID.
+     *     This one will be passed to methods finding data in the zone.
+     *     It is not required to keep them, in which case whatever might
+     *     be returned - the ID is only passed back to the database as
+     *     an opaque handle.
+     */
+    virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const = 0;
+};
+
+/**
+ * \brief Concrete data source client oriented at database backends.
+ *
+ * This class (together with corresponding versions of ZoneFinder,
+ * ZoneIterator, etc.) translates high-level data source queries to
+ * low-level calls on DatabaseAccessor. It calls multiple queries
+ * if necessary and validates data from the database, allowing the
+ * DatabaseAccessor to be just simple translation to SQL/other
+ * queries to database.
+ *
+ * While it is possible to subclass it for specific database in case
+ * of special needs, it is not expected to be needed. This should just
+ * work as it is with whatever DatabaseAccessor.
+ */
+class DatabaseClient : public DataSourceClient {
+public:
+    /**
+     * \brief Constructor
+     *
+     * It initializes the client with a database.
+     *
+     * \exception isc::InvalidParameter if database is NULL. It might throw
+     * standard allocation exception as well, but doesn't throw anything else.
+     *
+     * \param database The database to use to get data. As the parameter
+     *     suggests, the client takes ownership of the database and will
+     *     delete it when itself deleted.
+     */
+    DatabaseClient(boost::shared_ptr<DatabaseAccessor> database);
+    /**
+     * \brief Corresponding ZoneFinder implementation
+     *
+     * The zone finder implementation for database data sources. Similarly
+     * to the DatabaseClient, it translates the queries to methods of the
+     * database.
+     *
+     * Application should not come directly in contact with this class
+     * (it should handle it trough generic ZoneFinder pointer), therefore
+     * it could be completely hidden in the .cc file. But it is provided
+     * to allow testing and for rare cases when a database needs slightly
+     * different handling, so it can be subclassed.
+     *
+     * Methods directly corresponds to the ones in ZoneFinder.
+     */
+    class Finder : public ZoneFinder {
+    public:
+        /**
+         * \brief Constructor
+         *
+         * \param database The database (shared with DatabaseClient) to
+         *     be used for queries (the one asked for ID before).
+         * \param zone_id The zone ID which was returned from
+         *     DatabaseAccessor::getZone and which will be passed to further
+         *     calls to the database.
+         */
+        Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id);
+        // The following three methods are just implementations of inherited
+        // ZoneFinder's pure virtual methods.
+        virtual isc::dns::Name getOrigin() const;
+        virtual isc::dns::RRClass getClass() const;
+        virtual FindResult find(const isc::dns::Name& name,
+                                const isc::dns::RRType& type,
+                                isc::dns::RRsetList* target = NULL,
+                                const FindOptions options = FIND_DEFAULT)
+            const;
+        /**
+         * \brief The zone ID
+         *
+         * This function provides the stored zone ID as passed to the
+         * constructor. This is meant for testing purposes and normal
+         * applications shouldn't need it.
+         */
+        int zone_id() const { return (zone_id_); }
+        /**
+         * \brief The database.
+         *
+         * This function provides the database stored inside as
+         * passed to the constructor. This is meant for testing purposes and
+         * normal applications shouldn't need it.
+         */
+        const DatabaseAccessor& database() const {
+            return (*database_);
+        }
+    private:
+        boost::shared_ptr<DatabaseAccessor> database_;
+        const int zone_id_;
+    };
+    /**
+     * \brief Find a zone in the database
+     *
+     * This queries database's getZone to find the best matching zone.
+     * It will propagate whatever exceptions are thrown from that method
+     * (which is not restricted in any way).
+     *
+     * \param name Name of the zone or data contained there.
+     * \return FindResult containing the code and an instance of Finder, if
+     *     anything is found. However, application should not rely on the
+     *     ZoneFinder being instance of Finder (possible subclass of this class
+     *     may return something else and it may change in future versions), it
+     *     should use it as a ZoneFinder only.
+     */
+    virtual FindResult findZone(const isc::dns::Name& name) const;
+private:
+    /// \brief Our database.
+    const boost::shared_ptr<DatabaseAccessor> database_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 3dc69e0..1a911c0 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -400,12 +400,22 @@ enough information for it.  The code is 1 for error, 2 for not implemented.
 
 % DATASRC_SQLITE_CLOSE closing SQLite database
 Debug information. The SQLite data source is closing the database file.
+
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
+% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
+The database file is no longer needed and is being closed.
+
 % DATASRC_SQLITE_CREATE SQLite data source created
 Debug information. An instance of SQLite data source is being created.
 
 % DATASRC_SQLITE_DESTROY SQLite data source destroyed
 Debug information. An instance of SQLite data source is being destroyed.
 
+% DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized
+The object around a database connection is being destroyed.
+
 % DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
 Debug information. The SQLite data source is trying to identify which zone
 should hold this domain.
@@ -458,6 +468,9 @@ source.
 The SQLite data source was asked to provide a NSEC3 record for given zone.
 But it doesn't contain that zone.
 
+% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
+A wrapper object to hold database connection is being initialized.
+
 % DATASRC_SQLITE_OPEN opening SQLite database '%1'
 Debug information. The SQLite data source is loading an SQLite database in
 the provided file.
@@ -496,4 +509,3 @@ data source.
 % DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
 This indicates a programming error. An internal task of unknown type was
 generated.
-
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 3d24ce0..26223da 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -606,12 +606,12 @@ InMemoryZoneFinder::~InMemoryZoneFinder() {
     delete impl_;
 }
 
-const Name&
+Name
 InMemoryZoneFinder::getOrigin() const {
     return (impl_->origin_);
 }
 
-const RRClass&
+RRClass
 InMemoryZoneFinder::getClass() const {
     return (impl_->zone_class_);
 }
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 9bed960..9707797 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -58,10 +58,10 @@ public:
     //@}
 
     /// \brief Returns the origin of the zone.
-    virtual const isc::dns::Name& getOrigin() const;
+    virtual isc::dns::Name getOrigin() const;
 
     /// \brief Returns the class of the zone.
-    virtual const isc::dns::RRClass& getClass() const;
+    virtual isc::dns::RRClass getClass() const;
 
     /// \brief Looks up an RRset in the zone.
     ///
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
new file mode 100644
index 0000000..352768d
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -0,0 +1,322 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sqlite3.h>
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/logger.h>
+#include <datasrc/data_source.h>
+
+namespace isc {
+namespace datasrc {
+
+struct SQLite3Parameters {
+    SQLite3Parameters() :
+        db_(NULL), version_(-1),
+        q_zone_(NULL) /*, q_record_(NULL), q_addrs_(NULL), q_referral_(NULL),
+        q_any_(NULL), q_count_(NULL), q_previous_(NULL), q_nsec3_(NULL),
+        q_prevnsec3_(NULL) */
+    {}
+    sqlite3* db_;
+    int version_;
+    sqlite3_stmt* q_zone_;
+    /*
+    TODO: Yet unneeded statements
+    sqlite3_stmt* q_record_;
+    sqlite3_stmt* q_addrs_;
+    sqlite3_stmt* q_referral_;
+    sqlite3_stmt* q_any_;
+    sqlite3_stmt* q_count_;
+    sqlite3_stmt* q_previous_;
+    sqlite3_stmt* q_nsec3_;
+    sqlite3_stmt* q_prevnsec3_;
+    */
+};
+
+SQLite3Database::SQLite3Database(const std::string& filename,
+                                     const isc::dns::RRClass& rrclass) :
+    dbparameters_(new SQLite3Parameters),
+    class_(rrclass.toText())
+{
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+    open(filename);
+}
+
+namespace {
+
+// This is a helper class to initialize a Sqlite3 DB safely.  An object of
+// this class encapsulates all temporary resources that are necessary for
+// the initialization, and release them in the destructor.  Once everything
+// is properly initialized, the move() method moves the allocated resources
+// to the main object in an exception free manner.  This way, the main code
+// for the initialization can be exception safe, and can provide the strong
+// exception guarantee.
+class Initializer {
+public:
+    ~Initializer() {
+        if (params_.q_zone_ != NULL) {
+            sqlite3_finalize(params_.q_zone_);
+        }
+        /*
+        if (params_.q_record_ != NULL) {
+            sqlite3_finalize(params_.q_record_);
+        }
+        if (params_.q_addrs_ != NULL) {
+            sqlite3_finalize(params_.q_addrs_);
+        }
+        if (params_.q_referral_ != NULL) {
+            sqlite3_finalize(params_.q_referral_);
+        }
+        if (params_.q_any_ != NULL) {
+            sqlite3_finalize(params_.q_any_);
+        }
+        if (params_.q_count_ != NULL) {
+            sqlite3_finalize(params_.q_count_);
+        }
+        if (params_.q_previous_ != NULL) {
+            sqlite3_finalize(params_.q_previous_);
+        }
+        if (params_.q_nsec3_ != NULL) {
+            sqlite3_finalize(params_.q_nsec3_);
+        }
+        if (params_.q_prevnsec3_ != NULL) {
+            sqlite3_finalize(params_.q_prevnsec3_);
+        }
+        */
+        if (params_.db_ != NULL) {
+            sqlite3_close(params_.db_);
+        }
+    }
+    void move(SQLite3Parameters* dst) {
+        *dst = params_;
+        params_ = SQLite3Parameters(); // clear everything
+    }
+    SQLite3Parameters params_;
+};
+
+const char* const SCHEMA_LIST[] = {
+    "CREATE TABLE schema_version (version INTEGER NOT NULL)",
+    "INSERT INTO schema_version VALUES (1)",
+    "CREATE TABLE zones (id INTEGER PRIMARY KEY, "
+    "name STRING NOT NULL COLLATE NOCASE, "
+    "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+    "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+    "CREATE INDEX zones_byname ON zones (name)",
+    "CREATE TABLE records (id INTEGER PRIMARY KEY, "
+    "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+    "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+    "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+    "rdata STRING NOT NULL)",
+    "CREATE INDEX records_byname ON records (name)",
+    "CREATE INDEX records_byrname ON records (rname)",
+    "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
+    "hash STRING NOT NULL COLLATE NOCASE, "
+    "owner STRING NOT NULL COLLATE NOCASE, "
+    "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+    "rdata STRING NOT NULL)",
+    "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+    NULL
+};
+
+const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2";
+
+/* TODO: Prune the statements, not everything will be needed maybe?
+const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
+    "FROM records WHERE zone_id=?1 AND name=?2 AND "
+    "((rdtype=?3 OR sigtype=?3) OR "
+    "(rdtype='CNAME' OR sigtype='CNAME') OR "
+    "(rdtype='NS' OR sigtype='NS'))";
+
+const char* const q_addrs_str = "SELECT rdtype, ttl, sigtype, rdata "
+    "FROM records WHERE zone_id=?1 AND name=?2 AND "
+    "(rdtype='A' OR sigtype='A' OR rdtype='AAAA' OR sigtype='AAAA')";
+
+const char* const q_referral_str = "SELECT rdtype, ttl, sigtype, rdata FROM "
+    "records WHERE zone_id=?1 AND name=?2 AND"
+    "(rdtype='NS' OR sigtype='NS' OR rdtype='DS' OR sigtype='DS' OR "
+    "rdtype='DNAME' OR sigtype='DNAME')";
+
+const char* const q_any_str = "SELECT rdtype, ttl, sigtype, rdata "
+    "FROM records WHERE zone_id=?1 AND name=?2";
+
+const char* const q_count_str = "SELECT COUNT(*) FROM records "
+    "WHERE zone_id=?1 AND rname LIKE (?2 || '%');";
+
+const char* const q_previous_str = "SELECT name FROM records "
+    "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+    "rname < $2 ORDER BY rname DESC LIMIT 1";
+
+const char* const q_nsec3_str = "SELECT rdtype, ttl, rdata FROM nsec3 "
+    "WHERE zone_id = ?1 AND hash = $2";
+
+const char* const q_prevnsec3_str = "SELECT hash FROM nsec3 "
+    "WHERE zone_id = ?1 AND hash <= $2 ORDER BY hash DESC LIMIT 1";
+    */
+
+sqlite3_stmt*
+prepare(sqlite3* const db, const char* const statement) {
+    sqlite3_stmt* prepared = NULL;
+    if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
+                  statement);
+    }
+    return (prepared);
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+    sqlite3* const db = initializer->params_.db_;
+
+    sqlite3_stmt* prepared = NULL;
+    if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
+                           &prepared, NULL) == SQLITE_OK &&
+        sqlite3_step(prepared) == SQLITE_ROW) {
+        initializer->params_.version_ = sqlite3_column_int(prepared, 0);
+        sqlite3_finalize(prepared);
+    } else {
+        logger.info(DATASRC_SQLITE_SETUP);
+        if (prepared != NULL) {
+            sqlite3_finalize(prepared);
+        }
+        for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
+            if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
+                SQLITE_OK) {
+                isc_throw(SQLite3Error,
+                          "Failed to set up schema " << SCHEMA_LIST[i]);
+            }
+        }
+    }
+
+    initializer->params_.q_zone_ = prepare(db, q_zone_str);
+    /* TODO: Yet unneeded statements
+    initializer->params_.q_record_ = prepare(db, q_record_str);
+    initializer->params_.q_addrs_ = prepare(db, q_addrs_str);
+    initializer->params_.q_referral_ = prepare(db, q_referral_str);
+    initializer->params_.q_any_ = prepare(db, q_any_str);
+    initializer->params_.q_count_ = prepare(db, q_count_str);
+    initializer->params_.q_previous_ = prepare(db, q_previous_str);
+    initializer->params_.q_nsec3_ = prepare(db, q_nsec3_str);
+    initializer->params_.q_prevnsec3_ = prepare(db, q_prevnsec3_str);
+    */
+}
+
+}
+
+void
+SQLite3Database::open(const std::string& name) {
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
+    if (dbparameters_->db_ != NULL) {
+        // There shouldn't be a way to trigger this anyway
+        isc_throw(DataSourceError, "Duplicate SQLite open with " << name);
+    }
+
+    Initializer initializer;
+
+    if (sqlite3_open(name.c_str(), &initializer.params_.db_) != 0) {
+        isc_throw(SQLite3Error, "Cannot open SQLite database file: " << name);
+    }
+
+    checkAndSetupSchema(&initializer);
+    initializer.move(dbparameters_);
+}
+
+SQLite3Database::~SQLite3Database() {
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
+    if (dbparameters_->db_ != NULL) {
+        close();
+    }
+    delete dbparameters_;
+}
+
+void
+SQLite3Database::close(void) {
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
+    if (dbparameters_->db_ == NULL) {
+        isc_throw(DataSourceError,
+                  "SQLite data source is being closed before open");
+    }
+
+    // XXX: sqlite3_finalize() could fail.  What should we do in that case?
+    sqlite3_finalize(dbparameters_->q_zone_);
+    dbparameters_->q_zone_ = NULL;
+
+    /* TODO: Once they are needed or not, uncomment or drop
+    sqlite3_finalize(dbparameters->q_record_);
+    dbparameters->q_record_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_addrs_);
+    dbparameters->q_addrs_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_referral_);
+    dbparameters->q_referral_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_any_);
+    dbparameters->q_any_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_count_);
+    dbparameters->q_count_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_previous_);
+    dbparameters->q_previous_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_prevnsec3_);
+    dbparameters->q_prevnsec3_ = NULL;
+
+    sqlite3_finalize(dbparameters->q_nsec3_);
+    dbparameters->q_nsec3_ = NULL;
+    */
+
+    sqlite3_close(dbparameters_->db_);
+    dbparameters_->db_ = NULL;
+}
+
+std::pair<bool, int>
+SQLite3Database::getZone(const isc::dns::Name& name) const {
+    int rc;
+
+    // Take the statement (simple SELECT id FROM zones WHERE...)
+    // and prepare it (bind the parameters to it)
+    sqlite3_reset(dbparameters_->q_zone_);
+    rc = sqlite3_bind_text(dbparameters_->q_zone_, 1, name.toText().c_str(),
+                           -1, SQLITE_STATIC);
+    if (rc != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not bind " << name <<
+                  " to SQL statement (zone)");
+    }
+    rc = sqlite3_bind_text(dbparameters_->q_zone_, 2, class_.c_str(), -1,
+                           SQLITE_STATIC);
+    if (rc != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not bind " << class_ <<
+                  " to SQL statement (zone)");
+    }
+
+    // Get the data there and see if it found anything
+    rc = sqlite3_step(dbparameters_->q_zone_);
+    std::pair<bool, int> result;
+    if (rc == SQLITE_ROW) {
+        result = std::pair<bool, int>(true,
+                                      sqlite3_column_int(dbparameters_->
+                                                         q_zone_, 0));
+    } else {
+        result = std::pair<bool, int>(false, 0);
+    }
+    // Free resources
+    sqlite3_reset(dbparameters_->q_zone_);
+
+    return (result);
+}
+
+}
+}
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
new file mode 100644
index 0000000..0d7ddee
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -0,0 +1,105 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __DATASRC_SQLITE3_ACCESSOR_H
+#define __DATASRC_SQLITE3_ACCESSOR_H
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+
+#include <string>
+
+namespace isc {
+namespace dns {
+class RRClass;
+}
+
+namespace datasrc {
+
+/**
+ * \brief Low-level database error
+ *
+ * This exception is thrown when the SQLite library complains about something.
+ * It might mean corrupt database file, invalid request or that something is
+ * rotten in the library.
+ */
+class SQLite3Error : public Exception {
+public:
+    SQLite3Error(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
+struct SQLite3Parameters;
+
+/**
+ * \brief Concrete implementation of DatabaseAccessor for SQLite3 databases
+ *
+ * This opens one database file with our schema and serves data from there.
+ * According to the design, it doesn't interpret the data in any way, it just
+ * provides unified access to the DB.
+ */
+class SQLite3Database : public DatabaseAccessor {
+public:
+    /**
+     * \brief Constructor
+     *
+     * This opens the database and becomes ready to serve data from there.
+     *
+     * \exception SQLite3Error will be thrown if the given database file
+     * doesn't work (it is broken, doesn't exist and can't be created, etc).
+     *
+     * \param filename The database file to be used.
+     * \param rrclass Which class of data it should serve (while the database
+     *     file can contain multiple classes of data, single database can
+     *     provide only one class).
+     */
+    SQLite3Database(const std::string& filename,
+                    const isc::dns::RRClass& rrclass);
+    /**
+     * \brief Destructor
+     *
+     * Closes the database.
+     */
+    ~SQLite3Database();
+    /**
+     * \brief Look up a zone
+     *
+     * This implements the getZone from DatabaseAccessor and looks up a zone
+     * in the data. It looks for a zone with the exact given origin and class
+     * passed to the constructor.
+     *
+     * \exception SQLite3Error if something about the database is broken.
+     *
+     * \param name The name of zone to look up
+     * \return The pair contains if the lookup was successful in the first
+     *     element and the zone id in the second if it was.
+     */
+    virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const;
+private:
+    /// \brief Private database data
+    SQLite3Parameters* dbparameters_;
+    /// \brief The class for which the queries are done
+    const std::string class_;
+    /// \brief Opens the database
+    void open(const std::string& filename);
+    /// \brief Closes the database
+    void close();
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index fbcf9c9..1a65f82 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -28,6 +28,8 @@ run_unittests_SOURCES += rbtree_unittest.cc
 run_unittests_SOURCES += zonetable_unittest.cc
 run_unittests_SOURCES += memory_datasrc_unittest.cc
 run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += database_unittest.cc
+run_unittests_SOURCES += sqlite3_accessor_unittest.cc
 
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
@@ -36,6 +38,7 @@ run_unittests_LDADD  = $(GTEST_LDADD)
 run_unittests_LDADD += $(SQLITE_LIBS)
 run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
new file mode 100644
index 0000000..4144a5b
--- /dev/null
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -0,0 +1,99 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+#include <exceptions/exceptions.h>
+
+#include <datasrc/database.h>
+
+using namespace isc::datasrc;
+using namespace std;
+using namespace boost;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * A virtual database connection that pretends it contains single zone --
+ * example.org.
+ */
+class MockAccessor : public DatabaseAccessor {
+public:
+    virtual std::pair<bool, int> getZone(const Name& name) const {
+        if (name == Name("example.org")) {
+            return (std::pair<bool, int>(true, 42));
+        } else {
+            return (std::pair<bool, int>(false, 0));
+        }
+    }
+};
+
+class DatabaseClientTest : public ::testing::Test {
+public:
+    DatabaseClientTest() {
+        createClient();
+    }
+    /*
+     * We initialize the client from a function, so we can call it multiple
+     * times per test.
+     */
+    void createClient() {
+        current_database_ = new MockAccessor();
+        client_.reset(new DatabaseClient(shared_ptr<DatabaseAccessor>(
+             current_database_)));
+    }
+    // Will be deleted by client_, just keep the current value for comparison.
+    MockAccessor* current_database_;
+    shared_ptr<DatabaseClient> client_;
+    /**
+     * Check the zone finder is a valid one and references the zone ID and
+     * database available here.
+     */
+    void checkZoneFinder(const DataSourceClient::FindResult& zone) {
+        ASSERT_NE(ZoneFinderPtr(), zone.zone_finder) << "No zone finder";
+        shared_ptr<DatabaseClient::Finder> finder(
+            dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+        ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
+            "Wrong type of finder";
+        EXPECT_EQ(42, finder->zone_id());
+        EXPECT_EQ(current_database_, &finder->database());
+    }
+};
+
+TEST_F(DatabaseClientTest, zoneNotFound) {
+    DataSourceClient::FindResult zone(client_->findZone(Name("example.com")));
+    EXPECT_EQ(result::NOTFOUND, zone.code);
+}
+
+TEST_F(DatabaseClientTest, exactZone) {
+    DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
+    EXPECT_EQ(result::SUCCESS, zone.code);
+    checkZoneFinder(zone);
+}
+
+TEST_F(DatabaseClientTest, superZone) {
+    DataSourceClient::FindResult zone(client_->findZone(Name(
+        "sub.example.org")));
+    EXPECT_EQ(result::PARTIALMATCH, zone.code);
+    checkZoneFinder(zone);
+}
+
+TEST_F(DatabaseClientTest, noAccessorException) {
+    EXPECT_THROW(DatabaseClient(shared_ptr<DatabaseAccessor>()),
+                 isc::InvalidParameter);
+}
+
+}
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
new file mode 100644
index 0000000..409201d
--- /dev/null
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -0,0 +1,103 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using isc::data::ConstElementPtr;
+using isc::data::Element;
+using isc::dns::RRClass;
+using isc::dns::Name;
+
+namespace {
+// Some test data
+std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
+std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+std::string SQLITE_DBFILE_MEMORY = ":memory:";
+
+// The following file must be non existent and must be non"creatable";
+// the sqlite3 library will try to create a new DB file if it doesn't exist,
+// so to test a failure case the create operation should also fail.
+// The "nodir", a non existent directory, is inserted for this purpose.
+std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+
+// Opening works (the content is tested in different tests)
+TEST(SQLite3Open, common) {
+    EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_EXAMPLE,
+                                       RRClass::IN()));
+}
+
+// The file can't be opened
+TEST(SQLite3Open, notExist) {
+    EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_NOTEXIST,
+                                    RRClass::IN()), SQLite3Error);
+}
+
+// It rejects broken DB
+TEST(SQLite3Open, brokenDB) {
+    EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_BROKENDB,
+                                    RRClass::IN()), SQLite3Error);
+}
+
+// Test we can create the schema on the fly
+TEST(SQLite3Open, memoryDB) {
+    EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_MEMORY,
+                                       RRClass::IN()));
+}
+
+// Test fixture for querying the db
+class SQLite3Access : public ::testing::Test {
+public:
+    SQLite3Access() {
+        initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::IN());
+    }
+    // So it can be re-created with different data
+    void initAccessor(const std::string& filename, const RRClass& rrclass) {
+        db.reset(new SQLite3Database(filename, rrclass));
+    }
+    // The tested db
+    std::auto_ptr<SQLite3Database> db;
+};
+
+// This zone exists in the data, so it should be found
+TEST_F(SQLite3Access, getZone) {
+    std::pair<bool, int> result(db->getZone(Name("example.com")));
+    EXPECT_TRUE(result.first);
+    EXPECT_EQ(1, result.second);
+}
+
+// But it should find only the zone, nothing below it
+TEST_F(SQLite3Access, subZone) {
+    EXPECT_FALSE(db->getZone(Name("sub.example.com")).first);
+}
+
+// This zone is not there at all
+TEST_F(SQLite3Access, noZone) {
+    EXPECT_FALSE(db->getZone(Name("example.org")).first);
+}
+
+// This zone is there, but in different class
+TEST_F(SQLite3Access, noClass) {
+    initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::CH());
+    EXPECT_FALSE(db->getZone(Name("example.com")).first);
+}
+
+}
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 69785f0..f67ed4b 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -131,10 +131,10 @@ public:
     /// These methods should never throw an exception.
     //@{
     /// Return the origin name of the zone.
-    virtual const isc::dns::Name& getOrigin() const = 0;
+    virtual isc::dns::Name getOrigin() const = 0;
 
     /// Return the RR class of the zone.
-    virtual const isc::dns::RRClass& getClass() const = 0;
+    virtual isc::dns::RRClass getClass() const = 0;
     //@}
 
     ///
diff --git a/src/lib/dns/benchmarks/Makefile.am b/src/lib/dns/benchmarks/Makefile.am
index 8645385..0d7856f 100644
--- a/src/lib/dns/benchmarks/Makefile.am
+++ b/src/lib/dns/benchmarks/Makefile.am
@@ -13,5 +13,6 @@ noinst_PROGRAMS = rdatarender_bench
 rdatarender_bench_SOURCES = rdatarender_bench.cc
 
 rdatarender_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+rdatarender_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
 rdatarender_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 rdatarender_bench_LDADD += $(SQLITE_LIBS)
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 2557965..04a4dc4 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -19,6 +19,7 @@
 #include <boost/lexical_cast.hpp>
 
 #include <util/buffer.h>
+#include <util/strutil.h>
 #include <util/encode/base64.h>
 
 #include <dns/messagerenderer.h>
@@ -30,6 +31,7 @@ using namespace std;
 using namespace boost;
 using namespace isc::util;
 using namespace isc::util::encode;
+using namespace isc::util::str;
 
 // BEGIN_ISC_NAMESPACE
 // BEGIN_RDATA_NAMESPACE
@@ -65,45 +67,6 @@ struct TSIG::TSIGImpl {
     const vector<uint8_t> other_data_;
 };
 
-namespace {
-string
-getToken(istringstream& iss, const string& full_input) {
-    string token;
-    iss >> token;
-    if (iss.bad() || iss.fail()) {
-        isc_throw(InvalidRdataText, "Invalid TSIG text: parse error " <<
-                  full_input);
-    }
-    return (token);
-}
-
-// This helper function converts a string token to an *unsigned* integer.
-// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
-// wide to store resulting integers.
-// BitSize is the maximum number of bits that the resulting integer can take.
-// This function first checks whether the given token can be converted to
-// an integer of NumType type.  It then confirms the conversion result is
-// within the valid range, i.e., [0, 2^NumType - 1].  The second check is
-// necessary because lexical_cast<T> where T is an unsigned integer type
-// doesn't correctly reject negative numbers when compiled with SunStudio.
-template <typename NumType, int BitSize>
-NumType
-tokenToNum(const string& num_token) {
-    NumType num;
-    try {
-        num = lexical_cast<NumType>(num_token);
-    } catch (const boost::bad_lexical_cast& ex) {
-        isc_throw(InvalidRdataText, "Invalid TSIG numeric parameter: " <<
-                  num_token);
-    }
-    if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
-        isc_throw(InvalidRdataText, "Numeric TSIG parameter out of range: " <<
-                  num);
-    }
-    return (num);
-}
-}
-
 /// \brief Constructor from string.
 ///
 /// \c tsig_str must be formatted as follows:
@@ -148,47 +111,52 @@ tokenToNum(const string& num_token) {
 TSIG::TSIG(const std::string& tsig_str) : impl_(NULL) {
     istringstream iss(tsig_str);
 
-    const Name algorithm(getToken(iss, tsig_str));
-    const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss,
-                                                                 tsig_str));
-    const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-    const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
-    const string mac_txt = (macsize > 0) ? getToken(iss, tsig_str) : "";
-    vector<uint8_t> mac;
-    decodeBase64(mac_txt, mac);
-    if (mac.size() != macsize) {
-        isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
-    }
-
-    const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
-    const string error_txt = getToken(iss, tsig_str);
-    int32_t error = 0;
-    // XXX: In the initial implementation we hardcode the mnemonics.
-    // We'll soon generalize this.
-    if (error_txt == "BADSIG") {
-        error = 16;
-    } else if (error_txt == "BADKEY") {
-        error = 17;
-    } else if (error_txt == "BADTIME") {
-        error = 18;
-    } else {
-        error = tokenToNum<int32_t, 16>(error_txt);
-    }
-
-    const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-    const string otherdata_txt = (otherlen > 0) ? getToken(iss, tsig_str) : "";
-    vector<uint8_t> other_data;
-    decodeBase64(otherdata_txt, other_data);
-
-    if (!iss.eof()) {
-        isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
-                  tsig_str);
+    try {
+        const Name algorithm(getToken(iss));
+        const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss));
+        const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss));
+        const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss));
+
+        const string mac_txt = (macsize > 0) ? getToken(iss) : "";
+        vector<uint8_t> mac;
+        decodeBase64(mac_txt, mac);
+        if (mac.size() != macsize) {
+            isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
+        }
+
+        const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss));
+
+        const string error_txt = getToken(iss);
+        int32_t error = 0;
+        // XXX: In the initial implementation we hardcode the mnemonics.
+        // We'll soon generalize this.
+        if (error_txt == "BADSIG") {
+            error = 16;
+        } else if (error_txt == "BADKEY") {
+            error = 17;
+        } else if (error_txt == "BADTIME") {
+            error = 18;
+        } else {
+            error = tokenToNum<int32_t, 16>(error_txt);
+        }
+
+        const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss));
+        const string otherdata_txt = (otherlen > 0) ? getToken(iss) : "";
+        vector<uint8_t> other_data;
+        decodeBase64(otherdata_txt, other_data);
+
+        if (!iss.eof()) {
+            isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
+                    tsig_str);
+        }
+
+        impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
+                            error, other_data);
+
+    } catch (const StringTokenError& ste) {
+        isc_throw(InvalidRdataText, "Invalid TSIG text: " << ste.what() <<
+                  ": " << tsig_str);
     }
-
-    impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
-                         error, other_data);
 }
 
 /// \brief Constructor from wire-format data.
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
index c28c435..93b5d4d 100644
--- a/src/lib/dns/rdata/in_1/srv_33.cc
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -18,6 +18,7 @@
 #include <boost/lexical_cast.hpp>
 
 #include <util/buffer.h>
+#include <util/strutil.h>
 
 #include <dns/messagerenderer.h>
 #include <dns/name.h>
@@ -26,6 +27,7 @@
 
 using namespace std;
 using namespace isc::util;
+using namespace isc::util::str;
 
 // BEGIN_ISC_NAMESPACE
 // BEGIN_RDATA_NAMESPACE
@@ -44,39 +46,6 @@ struct SRVImpl {
     Name target_;
 };
 
-namespace {
-string
-getToken(istringstream& iss, const string& full_input) {
-    string token;
-    iss >> token;
-    if (iss.bad() || iss.fail()) {
-        isc_throw(InvalidRdataText, "Invalid SRV text: parse error " <<
-                  full_input);
-    }
-    return (token);
-}
-
-// This helper function converts a string token to an *unsigned* integer.
-// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
-// wide to store resulting integers.
-template <typename NumType, int BitSize>
-NumType
-tokenToNum(const string& num_token) {
-    NumType num;
-    try {
-        num = boost::lexical_cast<NumType>(num_token);
-    } catch (const boost::bad_lexical_cast& ex) {
-        isc_throw(InvalidRdataText, "Invalid SRV numeric parameter: " <<
-                  num_token);
-    }
-    if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
-        isc_throw(InvalidRdataText, "Numeric SRV parameter out of range: " <<
-                  num);
-    }
-    return (num);
-}
-}
-
 /// \brief Constructor from string.
 ///
 /// \c srv_str must be formatted as follows:
@@ -103,17 +72,22 @@ SRV::SRV(const string& srv_str) :
 {
     istringstream iss(srv_str);
 
-    const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss, srv_str));
-    const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss, srv_str));
-    const int32_t port = tokenToNum<int32_t, 16>(getToken(iss, srv_str));
-    const Name targetname(getToken(iss, srv_str));
-
-    if (!iss.eof()) {
-        isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
-                  srv_str);
+    try {
+        const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss));
+        const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss));
+        const int32_t port = tokenToNum<int32_t, 16>(getToken(iss));
+        const Name targetname(getToken(iss));
+
+        if (!iss.eof()) {
+            isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
+                    srv_str);
+        }
+
+        impl_ = new SRVImpl(priority, weight, port, targetname);
+    } catch (const StringTokenError& ste) {
+        isc_throw(InvalidRdataText, "Invalid SRV text: " <<
+                  ste.what() << ": " << srv_str);
     }
-
-    impl_ = new SRVImpl(priority, weight, port, targetname);
 }
 
 /// \brief Constructor from wire-format data.
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index 60735e9..743b5d2 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -49,8 +49,7 @@ BUILT_SOURCES += tsig_verify10.wire
 
 # NOTE: keep this in sync with real file listing
 # so is included in tarball
-EXTRA_DIST = gen-wiredata.py.in
-EXTRA_DIST += edns_toWire1.spec edns_toWire2.spec
+EXTRA_DIST = edns_toWire1.spec edns_toWire2.spec
 EXTRA_DIST += edns_toWire3.spec edns_toWire4.spec
 EXTRA_DIST += masterload.txt
 EXTRA_DIST += message_fromWire1 message_fromWire2
@@ -123,4 +122,4 @@ EXTRA_DIST += tsig_verify7.spec tsig_verify8.spec tsig_verify9.spec
 EXTRA_DIST += tsig_verify10.spec
 
 .spec.wire:
-	./gen-wiredata.py -o $@ $<
+	$(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/dns/tests/testdata/gen-wiredata.py.in b/src/lib/dns/tests/testdata/gen-wiredata.py.in
deleted file mode 100755
index 818c6e9..0000000
--- a/src/lib/dns/tests/testdata/gen-wiredata.py.in
+++ /dev/null
@@ -1,610 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import configparser, re, time, socket, sys
-from datetime import datetime
-from optparse import OptionParser
-
-re_hex = re.compile(r'^0x[0-9a-fA-F]+')
-re_decimal = re.compile(r'^\d+$')
-re_string = re.compile(r"\'(.*)\'$")
-
-dnssec_timefmt = '%Y%m%d%H%M%S'
-
-dict_qr = { 'query' : 0, 'response' : 1 }
-dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
-                'update' : 5 }
-rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
-dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
-               'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
-               'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
-rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
-dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
-                'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
-                'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
-                'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
-                'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
-                'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
-                'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
-                'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
-                'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
-                'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
-                'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
-                'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
-                'maila' : 254, 'any' : 255 }
-rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
-dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
-rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
-                          dict_rrclass.keys()])
-dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
-                   'rsasha1' : 5 }
-dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
-rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
-                            dict_algorithm.keys()])
-rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
-                                  dict_nsec3_algorithm.keys()])
-
-header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
-                   'rcode' : dict_rcode }
-question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
-rrsig_xtables = { 'algorithm' : dict_algorithm }
-
-def parse_value(value, xtable = {}):
-    if re.search(re_hex, value):
-        return int(value, 16)
-    if re.search(re_decimal, value):
-        return int(value)
-    m = re.match(re_string, value)
-    if m:
-        return m.group(1)
-    lovalue = value.lower()
-    if lovalue in xtable:
-        return xtable[lovalue]
-    return value
-
-def code_totext(code, dict):
-    if code in dict.keys():
-        return dict[code] + '(' + str(code) + ')'
-    return str(code)
-
-def encode_name(name, absolute=True):
-    # make sure the name is dot-terminated.  duplicate dots will be ignored
-    # below.
-    name += '.'
-    labels = name.split('.')
-    wire = ''
-    for l in labels:
-        if len(l) > 4 and l[0:4] == 'ptr=':
-            # special meta-syntax for compression pointer
-            wire += '%04x' % (0xc000 | int(l[4:]))
-            break
-        if absolute or len(l) > 0:
-            wire += '%02x' % len(l)
-            wire += ''.join(['%02x' % ord(ch) for ch in l])
-        if len(l) == 0:
-            break
-    return wire
-
-def encode_string(name, len=None):
-    if type(name) is int and len is not None:
-        return '%0.*x' % (len * 2, name)
-    return ''.join(['%02x' % ord(ch) for ch in name])
-
-def count_namelabels(name):
-    if name == '.':             # special case
-        return 0
-    m = re.match('^(.*)\.$', name)
-    if m:
-        name = m.group(1)
-    return len(name.split('.'))
-
-def get_config(config, section, configobj, xtables = {}):
-    try:
-        for field in config.options(section):
-            value = config.get(section, field)
-            if field in xtables.keys():
-                xtable = xtables[field]
-            else:
-                xtable = {}
-            configobj.__dict__[field] = parse_value(value, xtable)
-    except configparser.NoSectionError:
-        return False
-    return True
-
-def print_header(f, input_file):
-    f.write('''###
-### This data file was auto-generated from ''' + input_file + '''
-###
-''')
-
-class Name:
-    name = 'example.com'
-    pointer = None                # no compression by default
-    def dump(self, f):
-        name = self.name
-        if self.pointer is not None:
-            if len(name) > 0 and name[-1] != '.':
-                name += '.'
-            name += 'ptr=%d' % self.pointer
-        name_wire = encode_name(name)
-        f.write('\n# DNS Name: %s' % self.name)
-        if self.pointer is not None:
-            f.write(' + compression pointer: %d' % self.pointer)
-        f.write('\n')
-        f.write('%s' % name_wire)
-        f.write('\n')
-
-class DNSHeader:
-    id = 0x1035
-    (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
-    mbz = 0
-    rcode = 0                   # noerror
-    opcode = 0                  # query
-    (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
-    def dump(self, f):
-        f.write('\n# Header Section\n')
-        f.write('# ID=' + str(self.id))
-        f.write(' QR=' + ('Response' if self.qr else 'Query'))
-        f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
-        f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
-        f.write('%s' % (' AA' if self.aa else ''))
-        f.write('%s' % (' TC' if self.tc else ''))
-        f.write('%s' % (' RD' if self.rd else ''))
-        f.write('%s' % (' AD' if self.ad else ''))
-        f.write('%s' % (' CD' if self.cd else ''))
-        f.write('\n')
-        f.write('%04x ' % self.id)
-        flag_and_code = 0
-        flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
-                          self.tc << 9 | self.rd << 8 | self.ra << 7 |
-                          self.mbz << 6 | self.ad << 5 | self.cd << 4 |
-                          self.rcode)
-        f.write('%04x\n' % flag_and_code)
-        f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
-                (self.qdcount, self.ancount, self.nscount, self.arcount))
-        f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
-                                           self.nscount, self.arcount))
-
-class DNSQuestion:
-    name = 'example.com.'
-    rrtype = parse_value('A', dict_rrtype)
-    rrclass = parse_value('IN', dict_rrclass)
-    def dump(self, f):
-        f.write('\n# Question Section\n')
-        f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
-                (self.name,
-                 code_totext(self.rrtype, rdict_rrtype),
-                 code_totext(self.rrclass, rdict_rrclass)))
-        f.write(encode_name(self.name))
-        f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
-
-class EDNS:
-    name = '.'
-    udpsize = 4096
-    extrcode = 0
-    version = 0
-    do = 0
-    mbz = 0
-    rdlen = 0
-    def dump(self, f):
-        f.write('\n# EDNS OPT RR\n')
-        f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
-                (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
-                 self.udpsize, self.extrcode, self.version,
-                 1 if self.do else 0))
-        
-        code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
-        extflags = (self.do << 15) | (self.mbz & 0x8000)
-        f.write('%s %04x %04x %04x %04x\n' %
-                (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
-                 code_vers, extflags))
-        f.write('# RDLEN=%d\n' % self.rdlen)
-        f.write('%04x\n' % self.rdlen)
-
-class RR:
-    '''This is a base class for various types of RR test data.
-    For each RR type (A, AAAA, NS, etc), we define a derived class of RR
-    to dump type specific RDATA parameters.  This class defines parameters
-    common to all types of RDATA, namely the owner name, RR class and TTL.
-    The dump() method of derived classes are expected to call dump_header(),
-    whose default implementation is provided in this class.  This method
-    decides whether to dump the test data as an RR (with name, type, class)
-    or only as RDATA (with its length), and dumps the corresponding data
-    via the specified file object.
-
-    By convention we assume derived classes are named after the common
-    standard mnemonic of the corresponding RR types.  For example, the
-    derived class for the RR type SOA should be named "SOA".
-
-    Configurable parameters are as follows:
-    - as_rr (bool): Whether or not the data is to be dumped as an RR.  False
-      by default.
-    - rr_class (string): The RR class of the data.  Only meaningful when the
-      data is dumped as an RR.  Default is 'IN'.
-    - rr_ttl (integer): The TTL value of the RR.  Only meaningful when the
-      data is dumped as an RR.  Default is 86400 (1 day).
-    '''
-
-    def __init__(self):
-        self.as_rr = False
-        # only when as_rr is True, same for class/TTL:
-        self.rr_name = 'example.com'
-        self.rr_class = 'IN'
-        self.rr_ttl = 86400
-    def dump_header(self, f, rdlen):
-        type_txt = self.__class__.__name__
-        type_code = parse_value(type_txt, dict_rrtype)
-        if self.as_rr:
-            rrclass = parse_value(self.rr_class, dict_rrclass)
-            f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d RDLEN=%d)\n' %
-                    (type_txt, self.rr_name,
-                     code_totext(rrclass, rdict_rrclass), self.rr_ttl, rdlen))
-            f.write('%s %04x %04x %08x %04x\n' %
-                    (encode_name(self.rr_name), type_code, rrclass,
-                     self.rr_ttl, rdlen))
-        else:
-            f.write('\n# %s RDATA (RDLEN=%d)\n' % (type_txt, rdlen))
-            f.write('%04x\n' % rdlen)
-
-class A(RR):
-    rdlen = 4                   # fixed by default
-    address = '192.0.2.1'
-
-    def dump(self, f):
-        self.dump_header(f, self.rdlen)
-        f.write('# Address=%s\n' % (self.address))
-        bin_address = socket.inet_aton(self.address)
-        f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
-                                        bin_address[2], bin_address[3]))
-
-class NS(RR):
-    rdlen = None                   # auto calculate
-    nsname = 'ns.example.com'
-
-    def dump(self, f):
-        nsname_wire = encode_name(self.nsname)
-        if self.rdlen is None:
-            self.rdlen = len(nsname_wire) / 2
-        self.dump_header(f, self.rdlen)
-        f.write('# NS name=%s\n' % (self.nsname))
-        f.write('%s\n' % nsname_wire)
-
-class SOA(RR):
-    rdlen = None                  # auto-calculate
-    mname = 'ns.example.com'
-    rname = 'root.example.com'
-    serial = 2010012601
-    refresh = 3600
-    retry = 300
-    expire = 3600000
-    minimum = 1200
-    def dump(self, f):
-        mname_wire = encode_name(self.mname)
-        rname_wire = encode_name(self.rname)
-        if self.rdlen is None:
-            self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
-        self.dump_header(f, self.rdlen)
-        f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
-        f.write('%s %s\n' % (mname_wire, rname_wire))
-        f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
-                (self.serial, self.refresh, self.retry, self.expire,
-                 self.minimum))
-        f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
-                                                self.retry, self.expire,
-                                                self.minimum))
-
-class TXT(RR):
-    rdlen = None                # auto-calculate
-    nstring = 1                 # number of character-strings
-    stringlen = -1              # default string length, auto-calculate
-    string = 'Test String'      # default string
-    def dump(self, f):
-        stringlen_list = []
-        string_list = []
-        wirestring_list = []
-        for i in range(0, self.nstring):
-            key_string = 'string' + str(i)
-            if key_string in self.__dict__:
-                string_list.append(self.__dict__[key_string])
-            else:
-                string_list.append(self.string)
-            wirestring_list.append(encode_string(string_list[-1]))
-            key_stringlen = 'stringlen' + str(i)
-            if key_stringlen in self.__dict__:
-                stringlen_list.append(self.__dict__[key_stringlen])
-            else:
-                stringlen_list.append(self.stringlen)
-            if stringlen_list[-1] < 0:
-                stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
-        if self.rdlen is None:
-            self.rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
-        self.dump_header(f, self.rdlen)
-        for i in range(0, self.nstring):
-            f.write('# String Len=%d, String=\"%s\"\n' %
-                    (stringlen_list[i], string_list[i]))
-            f.write('%02x%s%s\n' % (stringlen_list[i],
-                                    ' ' if len(wirestring_list[i]) > 0 else '',
-                                    wirestring_list[i]))
-
-class RP:
-    '''Implements rendering RP RDATA in the wire format.
-    Configurable parameters are as follows:
-    - rdlen: 16-bit RDATA length.  If omitted, the accurate value is auto
-      calculated and used; if negative, the RDLEN field will be omitted from
-      the output data.
-    - mailbox: The mailbox field.
-    - text: The text field.
-    All of these parameters have the default values and can be omitted.
-    '''
-    rdlen = None                # auto-calculate
-    mailbox = 'root.example.com'
-    text = 'rp-text.example.com'
-    def dump(self, f):
-        mailbox_wire = encode_name(self.mailbox)
-        text_wire = encode_name(self.text)
-        if self.rdlen is None:
-            self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
-        else:
-            self.rdlen = int(self.rdlen)
-        if self.rdlen >= 0:
-            f.write('\n# RP RDATA (RDLEN=%d)\n' % self.rdlen)
-            f.write('%04x\n' % self.rdlen)
-        else:
-            f.write('\n# RP RDATA (RDLEN omitted)\n')
-        f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
-        f.write('%s %s\n' % (mailbox_wire, text_wire))
-
-class NSECBASE:
-    '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
-    these RRs.  The NSEC and NSEC3 classes will be inherited from this
-    class.'''
-    nbitmap = 1                 # number of bitmaps
-    block = 0
-    maplen = None              # default bitmap length, auto-calculate
-    bitmap = '040000000003'     # an arbtrarily chosen bitmap sample
-    def dump(self, f):
-        # first, construct the bitmpa data
-        block_list = []
-        maplen_list = []
-        bitmap_list = []
-        for i in range(0, self.nbitmap):
-            key_bitmap = 'bitmap' + str(i)
-            if key_bitmap in self.__dict__:
-                bitmap_list.append(self.__dict__[key_bitmap])
-            else:
-                bitmap_list.append(self.bitmap)
-            key_maplen = 'maplen' + str(i)
-            if key_maplen in self.__dict__:
-                maplen_list.append(self.__dict__[key_maplen])
-            else:
-                maplen_list.append(self.maplen)
-            if maplen_list[-1] is None: # calculate it if not specified
-                maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
-            key_block = 'block' + str(i)
-            if key_block in self.__dict__:
-               block_list.append(self.__dict__[key_block])
-            else:
-                block_list.append(self.block)
-
-        # dump RR-type specific part (NSEC or NSEC3)
-        self.dump_fixedpart(f, 2 * self.nbitmap + \
-                                int(len(''.join(bitmap_list)) / 2))
-
-        # dump the bitmap
-        for i in range(0, self.nbitmap):
-            f.write('# Bitmap: Block=%d, Length=%d\n' %
-                    (block_list[i], maplen_list[i]))
-            f.write('%02x %02x %s\n' %
-                    (block_list[i], maplen_list[i], bitmap_list[i]))
-
-class NSEC(NSECBASE):
-    rdlen = None                # auto-calculate
-    nextname = 'next.example.com'
-    def dump_fixedpart(self, f, bitmap_totallen):
-        name_wire = encode_name(self.nextname)
-        if self.rdlen is None:
-            # if rdlen needs to be calculated, it must be based on the bitmap
-            # length, because the configured maplen can be fake.
-            self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
-        f.write('\n# NSEC RDATA (RDLEN=%d)\n' % self.rdlen)
-        f.write('%04x\n' % self.rdlen);
-        f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
-                                                 int(len(name_wire) / 2)))
-        f.write('%s\n' % name_wire)
-
-class NSEC3(NSECBASE):
-    rdlen = None                # auto-calculate
-    hashalg = 1                 # SHA-1
-    optout = False              # opt-out flag
-    mbz = 0                     # other flag fields (none defined yet)
-    iterations = 1
-    saltlen = 5
-    salt = 's' * saltlen
-    hashlen = 20
-    hash = 'h' * hashlen
-    def dump_fixedpart(self, f, bitmap_totallen):
-        if self.rdlen is None:
-            # if rdlen needs to be calculated, it must be based on the bitmap
-            # length, because the configured maplen can be fake.
-            self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
-                + bitmap_totallen
-        f.write('\n# NSEC3 RDATA (RDLEN=%d)\n' % self.rdlen)
-        f.write('%04x\n' % self.rdlen)
-        optout_val = 1 if self.optout else 0
-        f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
-                (code_totext(self.hashalg, rdict_nsec3_algorithm),
-                 optout_val, self.mbz, self.iterations))
-        f.write('%02x %02x %04x\n' %
-                (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
-        f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
-        f.write('%02x%s%s\n' % (self.saltlen,
-                                ' ' if len(self.salt) > 0 else '',
-                                encode_string(self.salt)))
-        f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
-        f.write('%02x%s%s\n' % (self.hashlen,
-                                ' ' if len(self.hash) > 0 else '',
-                                encode_string(self.hash)))
-
-class RRSIG:
-    rdlen = -1                  # auto-calculate
-    covered = 1                 # A
-    algorithm = 5               # RSA-SHA1
-    labels = -1                 # auto-calculate (#labels of signer)
-    originalttl = 3600
-    expiration = int(time.mktime(datetime.strptime('20100131120000',
-                                                   dnssec_timefmt).timetuple()))
-    inception = int(time.mktime(datetime.strptime('20100101120000',
-                                                  dnssec_timefmt).timetuple()))
-    tag = 0x1035
-    signer = 'example.com'
-    signature = 0x123456789abcdef123456789abcdef
-    def dump(self, f):
-        name_wire = encode_name(self.signer)
-        sig_wire = '%x' % self.signature 
-        rdlen = self.rdlen
-        if rdlen < 0:
-            rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
-        labels = self.labels
-        if labels < 0:
-            labels = count_namelabels(self.signer)
-        f.write('\n# RRSIG RDATA (RDLEN=%d)\n' % rdlen)
-        f.write('%04x\n' % rdlen);
-        f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
-                (code_totext(self.covered, rdict_rrtype),
-                 code_totext(self.algorithm, rdict_algorithm), labels,
-                 self.originalttl))
-        f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
-                                           labels, self.originalttl))
-        f.write('# Expiration=%s, Inception=%s\n' %
-                (str(self.expiration), str(self.inception)))
-        f.write('%08x %08x\n' % (self.expiration, self.inception))
-        f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
-        f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
-
-class TSIG(RR):
-    rdlen = None                # auto-calculate
-    algorithm = 'hmac-sha256'
-    time_signed = 1286978795    # arbitrarily chosen default
-    fudge = 300
-    mac_size = None             # use a common value for the algorithm
-    mac = None                  # use 'x' * mac_size
-    original_id = 2845          # arbitrarily chosen default
-    error = 0
-    other_len = None         # 6 if error is BADTIME; otherwise 0
-    other_data = None        # use time_signed + fudge + 1 for BADTIME
-    dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
-
-    # TSIG has some special defaults
-    def __init__(self):
-        super().__init__()
-        self.rr_class = 'ANY'
-        self.rr_ttl = 0
-
-    def dump(self, f):
-        if str(self.algorithm) == 'hmac-md5':
-            name_wire = encode_name('hmac-md5.sig-alg.reg.int')
-        else:
-            name_wire = encode_name(self.algorithm)
-        mac_size = self.mac_size
-        if mac_size is None:
-            if self.algorithm in self.dict_macsize.keys():
-                mac_size = self.dict_macsize[self.algorithm]
-            else:
-                raise RuntimeError('TSIG Mac Size cannot be determined')
-        mac = encode_string('x' * mac_size) if self.mac is None else \
-            encode_string(self.mac, mac_size)
-        other_len = self.other_len
-        if other_len is None:
-            # 18 = BADTIME
-            other_len = 6 if self.error == 18 else 0
-        other_data = self.other_data
-        if other_data is None:
-            other_data = '%012x' % (self.time_signed + self.fudge + 1) \
-                if self.error == 18 else ''
-        else:
-            other_data = encode_string(self.other_data, other_len)
-        if self.rdlen is None:
-            self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
-                                 len(other_data) / 2)
-        self.dump_header(f, self.rdlen)
-        f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
-                (self.algorithm, self.time_signed, self.fudge))
-        f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
-        f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
-        f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
-        f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
-        f.write('%04x %04x\n' %  (self.original_id, self.error))
-        f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
-        f.write('%04x%s\n' % (other_len,
-                              ' ' + other_data if len(other_data) > 0 else ''))
-
-def get_config_param(section):
-    config_param = {'name' : (Name, {}),
-                    'header' : (DNSHeader, header_xtables),
-                    'question' : (DNSQuestion, question_xtables),
-                    'edns' : (EDNS, {}), 'a' : (A, {}), 'ns' : (NS, {}),
-                    'soa' : (SOA, {}), 'txt' : (TXT, {}),
-                    'rp' : (RP, {}), 'rrsig' : (RRSIG, {}),
-                    'nsec' : (NSEC, {}), 'nsec3' : (NSEC3, {}),
-                    'tsig' : (TSIG, {}) }
-    s = section
-    m = re.match('^([^:]+)/\d+$', section)
-    if m:
-        s = m.group(1)
-    return config_param[s]
-
-usage = '''usage: %prog [options] input_file'''
-
-if __name__ == "__main__":
-    parser = OptionParser(usage=usage)
-    parser.add_option('-o', '--output', action='store', dest='output',
-                      default=None, metavar='FILE',
-                      help='output file name [default: prefix of input_file]')
-    (options, args) = parser.parse_args()
-
-    if len(args) == 0:
-        parser.error('input file is missing')
-    configfile = args[0]
-
-    outputfile = options.output
-    if not outputfile:
-        m = re.match('(.*)\.[^.]+$', configfile)
-        if m:
-            outputfile = m.group(1)
-        else:
-            raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
-
-    config = configparser.SafeConfigParser()
-    config.read(configfile)
-
-    output = open(outputfile, 'w')
-
-    print_header(output, configfile)
-
-    # First try the 'custom' mode; if it fails assume the standard mode.
-    try:
-        sections = config.get('custom', 'sections').split(':')
-    except configparser.NoSectionError:
-        sections = ['header', 'question', 'edns']
-
-    for s in sections:
-        section_param = get_config_param(s)
-        (obj, xtables) = (section_param[0](), section_param[1])
-        if get_config(config, s, obj, xtables):
-            obj.dump(output)
-
-    output.close()
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index b391c1e..d94100b 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = datasrc cc config log net notify util testutils acl
+SUBDIRS = datasrc cc config log net notify util testutils acl bind10
 
 python_PYTHON = __init__.py
 
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
new file mode 100644
index 0000000..43a7605
--- /dev/null
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -0,0 +1,4 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py sockcreator.py
+pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/__init__.py b/src/lib/python/isc/bind10/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
new file mode 100644
index 0000000..9fcc74e
--- /dev/null
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+import os
+import subprocess
+from bind10_messages import *
+from libutil_io_python import recv_fd
+
+logger = isc.log.Logger("boss")
+
+"""
+Module that comunicates with the privileged socket creator (b10-sockcreator).
+"""
+
+class CreatorError(Exception):
+    """
+    Exception for socket creator related errors.
+
+    It has two members: fatal and errno and they are just holding the values
+    passed to the __init__ function.
+    """
+
+    def __init__(self, message, fatal, errno=None):
+        """
+        Creates the exception. The message argument is the usual string.
+        The fatal one tells if the error is fatal (eg. the creator crashed)
+        and errno is the errno value returned from socket creator, if
+        applicable.
+        """
+        Exception.__init__(self, message)
+        self.fatal = fatal
+        self.errno = errno
+
+class Parser:
+    """
+    This class knows the sockcreator language. It creates commands, sends them
+    and receives the answers and parses them.
+
+    It does not start it, the communication channel must be provided.
+
+    In theory, anything here can throw a fatal CreatorError exception, but it
+    happens only in case something like the creator process crashes. Any other
+    occasions are mentioned explicitly.
+    """
+
+    def __init__(self, creator_socket):
+        """
+        Creates the parser. The creator_socket is socket to the socket creator
+        process that will be used for communication. However, the object must
+        have a read_fd() method to read the file descriptor. This slightly
+        unusual trick with modifying an object is used to easy up testing.
+
+        You can use WrappedSocket in production code to add the method to any
+        ordinary socket.
+        """
+        self.__socket = creator_socket
+        logger.info(BIND10_SOCKCREATOR_INIT)
+
+    def terminate(self):
+        """
+        Asks the creator process to terminate and waits for it to close the
+        socket. Does not return anything. Raises a CreatorError if there is
+        still data on the socket, if there is an error closing the socket,
+        or if the socket had already been closed.
+        """
+        if self.__socket is None:
+            raise CreatorError('Terminated already', True)
+        logger.info(BIND10_SOCKCREATOR_TERMINATE)
+        try:
+            self.__socket.sendall(b'T')
+            # Wait for an EOF - it will return empty data
+            eof = self.__socket.recv(1)
+            if len(eof) != 0:
+                raise CreatorError('Protocol error - data after terminated',
+                                   True)
+            self.__socket = None
+        except socket.error as se:
+            self.__socket = None
+            raise CreatorError(str(se), True)
+
+    def get_socket(self, address, port, socktype):
+        """
+        Asks the socket creator process to create a socket. Pass an address
+        (the isc.net.IPaddr object), port number and socket type (either
+        string "UDP", "TCP" or constant socket.SOCK_DGRAM or
+        socket.SOCK_STREAM.
+
+        Blocks until it is provided by the socket creator process (which
+        should be fast, as it is on localhost) and returns the file descriptor
+        number. It raises a CreatorError exception if the creation fails.
+        """
+        if self.__socket is None:
+            raise CreatorError('Socket requested on terminated creator', True)
+        # First, assemble the request from parts
+        logger.info(BIND10_SOCKET_GET, address, port, socktype)
+        data = b'S'
+        if socktype == 'UDP' or socktype == socket.SOCK_DGRAM:
+            data += b'U'
+        elif socktype == 'TCP' or socktype == socket.SOCK_STREAM:
+            data += b'T'
+        else:
+            raise ValueError('Unknown socket type: ' + str(socktype))
+        if address.family == socket.AF_INET:
+            data += b'4'
+        elif address.family == socket.AF_INET6:
+            data += b'6'
+        else:
+            raise ValueError('Unknown address family in address')
+        data += struct.pack('!H', port)
+        data += address.addr
+        try:
+            # Send the request
+            self.__socket.sendall(data)
+            answer = self.__socket.recv(1)
+            if answer == b'S':
+                # Success!
+                result = self.__socket.read_fd()
+                logger.info(BIND10_SOCKET_CREATED, result)
+                return result
+            elif answer == b'E':
+                # There was an error, read the error as well
+                error = self.__socket.recv(1)
+                errno = struct.unpack('i',
+                                      self.__read_all(len(struct.pack('i',
+                                                                      0))))
+                if error == b'S':
+                    cause = 'socket'
+                elif error == b'B':
+                    cause = 'bind'
+                else:
+                    self.__socket = None
+                    logger.fatal(BIND10_SOCKCREATOR_BAD_CAUSE, error)
+                    raise CreatorError('Unknown error cause' + str(answer), True)
+                logger.error(BIND10_SOCKET_ERROR, cause, errno[0],
+                             os.strerror(errno[0]))
+                raise CreatorError('Error creating socket on ' + cause, False,
+                                   errno[0])
+            else:
+                self.__socket = None
+                logger.fatal(BIND10_SOCKCREATOR_BAD_RESPONSE, answer)
+                raise CreatorError('Unknown response ' + str(answer), True)
+        except socket.error as se:
+            self.__socket = None
+            logger.fatal(BIND10_SOCKCREATOR_TRANSPORT_ERROR, str(se))
+            raise CreatorError(str(se), True)
+
+    def __read_all(self, length):
+        """
+        Keeps reading until length data is read or EOF or error happens.
+
+        EOF is considered error as well and throws a CreatorError.
+        """
+        result = b''
+        while len(result) < length:
+            data = self.__socket.recv(length - len(result))
+            if len(data) == 0:
+                self.__socket = None
+                logger.fatal(BIND10_SOCKCREATOR_EOF)
+                raise CreatorError('Unexpected EOF', True)
+            result += data
+        return result
+
+class WrappedSocket:
+    """
+    This class wraps a socket and adds a read_fd method, so it can be used
+    for the Parser class conveniently. It simply copies all its guts into
+    itself and implements the method.
+    """
+    def __init__(self, socket):
+        # Copy whatever can be copied from the socket
+        for name in dir(socket):
+            if name not in ['__class__', '__weakref__']:
+                setattr(self, name, getattr(socket, name))
+        # Keep the socket, so we can prevent it from being garbage-collected
+        # and closed before we are removed ourself
+        self.__orig_socket = socket
+
+    def read_fd(self):
+        """
+        Read the file descriptor from the socket.
+        """
+        return recv_fd(self.fileno())
+
+# FIXME: Any idea how to test this? Starting an external process doesn't sound
+# OK
+class Creator(Parser):
+    """
+    This starts the socket creator and allows asking for the sockets.
+    """
+    def __init__(self, path):
+        (local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
+        # Popen does not like, for some reason, having the same socket for
+        # stdin as well as stdout, so we dup it before passing it there.
+        remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
+                                socket.SOCK_STREAM)
+        env = os.environ
+        env['PATH'] = path
+        self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
+                                          stdin=remote.fileno(),
+                                          stdout=remote2.fileno())
+        remote.close()
+        remote2.close()
+        Parser.__init__(self, WrappedSocket(local))
+
+    def pid(self):
+        return self.__process.pid
+
+    def kill(self):
+        logger.warn(BIND10_SOCKCREATOR_KILL)
+        if self.__process is not None:
+            self.__process.kill()
+            self.__process = None
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
new file mode 100644
index 0000000..f498b86
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -0,0 +1,29 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+#PYTESTS = args_test.py bind10_test.py
+# NOTE: this has a generated test found in the builddir
+PYTESTS = sockcreator_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+	touch $(abs_top_srcdir)/.coverage 
+	rm -f .coverage
+	${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+	for pytest in $(PYTESTS) ; do \
+	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
+	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+		$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+	done
+
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
new file mode 100644
index 0000000..4453184
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -0,0 +1,327 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This test file is generated .py.in -> .py just to be in the build dir,
+# same as the rest of the tests. Saves a lot of stuff in makefile.
+
+"""
+Tests for the bind10.sockcreator module.
+"""
+
+import unittest
+import struct
+import socket
+from isc.net.addr import IPAddr
+import isc.log
+from libutil_io_python import send_fd
+from isc.bind10.sockcreator import Parser, CreatorError, WrappedSocket
+
+class FakeCreator:
+    """
+    Class emulating the socket to the socket creator. It can be given expected
+    data to receive (and check) and responses to give to the Parser class
+    during testing.
+    """
+
+    class InvalidPlan(Exception):
+        """
+        Raised when someone wants to recv when sending is planned or vice
+        versa.
+        """
+        pass
+
+    class InvalidData(Exception):
+        """
+        Raises when the data passed to sendall are not the same as expected.
+        """
+        pass
+
+    def __init__(self, plan):
+        """
+        Create the object. The plan variable contains list of expected actions,
+        in form:
+
+        [('r', 'Data to return from recv'), ('s', 'Data expected on sendall'),
+             , ('d', 'File descriptor number to return from read_sock'), ('e',
+             None), ...]
+
+        It modifies the array as it goes.
+        """
+        self.__plan = plan
+
+    def __get_plan(self, expected):
+        if len(self.__plan) == 0:
+            raise InvalidPlan('Nothing more planned')
+        (kind, data) = self.__plan[0]
+        if kind == 'e':
+            self.__plan.pop(0)
+            raise socket.error('False socket error')
+        if kind != expected:
+            raise InvalidPlan('Planned ' + kind + ', but ' + expected +
+                'requested')
+        return data
+
+    def recv(self, maxsize):
+        """
+        Emulate recv. Returs maxsize bytes from the current recv plan. If
+        there are data left from previous recv call, it is used first.
+
+        If no recv is planned, raises InvalidPlan.
+        """
+        data = self.__get_plan('r')
+        result, rest = data[:maxsize], data[maxsize:]
+        if len(rest) > 0:
+            self.__plan[0] = ('r', rest)
+        else:
+            self.__plan.pop(0)
+        return result
+
+    def read_fd(self):
+        """
+        Emulate the reading of file descriptor. Returns one from a plan.
+
+        It raises InvalidPlan if no socket is planned now.
+        """
+        fd = self.__get_plan('f')
+        self.__plan.pop(0)
+        return fd
+
+    def sendall(self, data):
+        """
+        Checks that the data passed are correct according to plan. It raises
+        InvalidData if the data differs or InvalidPlan when sendall is not
+        expected.
+        """
+        planned = self.__get_plan('s')
+        dlen = len(data)
+        prefix, rest = planned[:dlen], planned[dlen:]
+        if prefix != data:
+            raise InvalidData('Expected "' + str(prefix)+ '", got "' +
+                str(data) + '"')
+        if len(rest) > 0:
+            self.__plan[0] = ('s', rest)
+        else:
+            self.__plan.pop(0)
+
+    def all_used(self):
+        """
+        Returns if the whole plan was consumed.
+        """
+        return len(self.__plan) == 0
+
+class ParserTests(unittest.TestCase):
+    """
+    Testcases for the Parser class.
+
+    A lot of these test could be done by
+    `with self.assertRaises(CreatorError) as cm`. But some versions of python
+    take the scope wrong and don't work, so we use the primitive way of
+    try-except.
+    """
+    def __terminate(self):
+        creator = FakeCreator([('s', b'T'), ('r', b'')])
+        parser = Parser(creator)
+        self.assertEqual(None, parser.terminate())
+        self.assertTrue(creator.all_used())
+        return parser
+
+    def test_terminate(self):
+        """
+        Test if the command to terminate is correct and it waits for reading the
+        EOF.
+        """
+        self.__terminate()
+
+    def __terminate_raises(self, parser):
+        """
+        Check that terminate() raises a fatal exception.
+        """
+        try:
+            parser.terminate()
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(ce.fatal)
+            self.assertEqual(None, ce.errno)
+
+    def test_terminate_error1(self):
+        """
+        Test it reports an exception when there's error terminating the creator.
+        This one raises an error when receiving the EOF.
+        """
+        creator = FakeCreator([('s', b'T'), ('e', None)])
+        parser = Parser(creator)
+        self.__terminate_raises(parser)
+
+    def test_terminate_error2(self):
+        """
+        Test it reports an exception when there's error terminating the creator.
+        This one raises an error when sending data.
+        """
+        creator = FakeCreator([('e', None)])
+        parser = Parser(creator)
+        self.__terminate_raises(parser)
+
+    def test_terminate_error3(self):
+        """
+        Test it reports an exception when there's error terminating the creator.
+        This one sends data when it should have terminated.
+        """
+        creator = FakeCreator([('s', b'T'), ('r', b'Extra data')])
+        parser = Parser(creator)
+        self.__terminate_raises(parser)
+
+    def test_terminate_twice(self):
+        """
+        Test we can't terminate twice.
+        """
+        parser = self.__terminate()
+        self.__terminate_raises(parser)
+
+    def test_crash(self):
+        """
+        Tests that the parser correctly raises exception when it crashes
+        unexpectedly.
+        """
+        creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'')])
+        parser = Parser(creator)
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(creator.all_used())
+            # Is the exception correct?
+            self.assertTrue(ce.fatal)
+            self.assertEqual(None, ce.errno)
+
+    def test_error(self):
+        """
+        Tests that the parser correctly raises non-fatal exception when
+        the socket can not be created.
+        """
+        # We split the int to see if it can cope with data coming in
+        # different packets
+        intpart = struct.pack('@i', 42)
+        creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'ES' +
+            intpart[:1]), ('r', intpart[1:])])
+        parser = Parser(creator)
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(creator.all_used())
+            # Is the exception correct?
+            self.assertFalse(ce.fatal)
+            self.assertEqual(42, ce.errno)
+
+    def __error(self, plan):
+        creator = FakeCreator(plan)
+        parser = Parser(creator)
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, socket.SOCK_DGRAM)
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(creator.all_used())
+            self.assertTrue(ce.fatal)
+
+    def test_error_send(self):
+        self.__error([('e', None)])
+
+    def test_error_recv(self):
+        self.__error([('s', b'SU4\0\0\0\0\0\0'), ('e', None)])
+
+    def test_error_read_fd(self):
+        self.__error([('s', b'SU4\0\0\0\0\0\0'), ('r', b'S'), ('e', None)])
+
+    def __create(self, addr, socktype, encoded):
+        creator = FakeCreator([('s', b'S' + encoded), ('r', b'S'), ('f', 42)])
+        parser = Parser(creator)
+        self.assertEqual(42, parser.get_socket(IPAddr(addr), 42, socktype))
+
+    def test_create1(self):
+        self.__create('192.0.2.0', 'UDP', b'U4\0\x2A\xC0\0\x02\0')
+
+    def test_create2(self):
+        self.__create('2001:db8::', socket.SOCK_STREAM,
+            b'T6\0\x2A\x20\x01\x0d\xb8\0\0\0\0\0\0\0\0\0\0\0\0')
+
+    def test_create_terminated(self):
+        """
+        Test we can't request sockets after it was terminated.
+        """
+        parser = self.__terminate()
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(ce.fatal)
+            self.assertEqual(None, ce.errno)
+
+    def test_invalid_socktype(self):
+        """
+        Test invalid socket type is rejected
+        """
+        self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+                          IPAddr('0.0.0.0'), 42, 'RAW')
+
+    def test_invalid_family(self):
+        """
+        Test it rejects invalid address family.
+        """
+        # Note: this produces a bad logger output, since this address
+        # can not be converted to string, so the original message with
+        # placeholders is output. This should not happen in practice, so
+        # it is harmless.
+        addr = IPAddr('0.0.0.0')
+        addr.family = 42
+        self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+                          addr, 42, socket.SOCK_DGRAM)
+
+class WrapTests(unittest.TestCase):
+    """
+    Tests for the wrap_socket function.
+    """
+    def test_wrap(self):
+        # We construct two pairs of socket. The receiving side of one pair will
+        # be wrapped. Then we send one of the other pair through this pair and
+        # check the received one can be used as a socket
+
+        # The transport socket
+        (t1, t2) = socket.socketpair()
+        # The payload socket
+        (p1, p2) = socket.socketpair()
+
+        t2 = WrappedSocket(t2)
+
+        # Transfer the descriptor
+        send_fd(t1.fileno(), p1.fileno())
+        p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
+
+        # Now, pass some data trough the socket
+        p1.send(b'A')
+        data = p2.recv(1)
+        self.assertEqual(b'A', data)
+
+        # Test the wrapping didn't hurt the socket's usual methods
+        t1.send(b'B')
+        data = t2.recv(1)
+        self.assertEqual(b'B', data)
+        t2.send(b'C')
+        data = t1.recv(1)
+        self.assertEqual(b'C', data)
+
+if __name__ == '__main__':
+    isc.log.init("bind10") # FIXME Should this be needed?
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 461541e..ba7724c 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -426,7 +426,7 @@ class UIModuleCCSession(MultiConfigData):
             raise ModuleCCSessionError("Bad config version")
         self._set_current_config(config)
 
-    def _add_value_to_list(self, identifier, value):
+    def _add_value_to_list(self, identifier, value, module_spec):
         cur_list, status = self.get_value(identifier)
         if not cur_list:
             cur_list = []
@@ -492,7 +492,7 @@ class UIModuleCCSession(MultiConfigData):
                 if set_value_str is not None:
                     value_str += set_value_str
                 value = isc.cc.data.parse_value_str(value_str)
-            self._add_value_to_list(identifier, value)
+            self._add_value_to_list(identifier, value, module_spec)
         elif 'named_set_item_spec' in module_spec:
             item_name = None
             item_value = None
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index c820ad9..351c8e6 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -745,6 +745,8 @@ class TestUIModuleCCSession(unittest.TestCase):
                           uccs.remove_value, "Spec2/item5[123]", None)
         uccs.remove_value("Spec2/item5[0]", None)
         self.assertEqual({'Spec2': {'item5': []}}, uccs._local_changes)
+        uccs.add_value("Spec2/item5", None);
+        self.assertEqual({'Spec2': {'item5': ['']}}, uccs._local_changes)
 
     def test_add_remove_value_named_set(self):
         fake_conn = fakeUIConn()
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
index f9de744..570f51e 100644
--- a/src/lib/python/isc/notify/notify_out_messages.mes
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -78,6 +78,6 @@ message, either in the message parser, or while trying to extract data
 from the parsed message. The error is printed, and notify_out will
 treat the response as a bad message, but this does point to a
 programming error, since all exceptions should have been caught
-explicitely. Please file a bug report. Since there was a response,
+explicitly. Please file a bug report. Since there was a response,
 no more notifies will be sent to this server for this notification
 event.
diff --git a/src/lib/resolve/tests/Makefile.am b/src/lib/resolve/tests/Makefile.am
index ee311a6..cf05d9b 100644
--- a/src/lib/resolve/tests/Makefile.am
+++ b/src/lib/resolve/tests/Makefile.am
@@ -31,6 +31,7 @@ run_unittests_LDADD +=  $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/asiodns/libasiodns.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD +=  $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/testutils/testdata/Makefile.am b/src/lib/testutils/testdata/Makefile.am
index 93b9eb9..918d5c5 100644
--- a/src/lib/testutils/testdata/Makefile.am
+++ b/src/lib/testutils/testdata/Makefile.am
@@ -32,4 +32,4 @@ EXTRA_DIST += test2.zone.in
 EXTRA_DIST += test2-new.zone.in
 
 .spec.wire:
-	$(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+	$(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/util/Makefile.am b/src/lib/util/Makefile.am
index 3db9ac4..0b78b29 100644
--- a/src/lib/util/Makefile.am
+++ b/src/lib/util/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = . io unittests tests pyunittests
+SUBDIRS = . io unittests tests pyunittests python
 
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
diff --git a/src/lib/util/python/Makefile.am b/src/lib/util/python/Makefile.am
new file mode 100644
index 0000000..81d528c
--- /dev/null
+++ b/src/lib/util/python/Makefile.am
@@ -0,0 +1 @@
+noinst_SCRIPTS = gen_wiredata.py mkpywrapper.py
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
new file mode 100755
index 0000000..8e1f079
--- /dev/null
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -0,0 +1,1189 @@
+#!@PYTHON@
+
+# Copyright (C) 2010  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Generator of various types of DNS data in the hex format.
+
+This script reads a human readable specification file (called "spec
+file" hereafter) that defines some type of DNS data (an RDATA, an RR,
+or a complete message) and dumps the defined data to a separate file
+as a "wire format" sequence parsable by the
+UnitTestUtil::readWireData() function (currently defined as part of
+libdns++ tests).  Many DNS related tests involve wire format test
+data, so it will be convenient if we can define the data in a more
+intuitive way than writing the entire hex sequence by hand.
+
+Here is a simple example.  Consider the following spec file:
+
+  [custom]
+  sections: a
+  [a]
+  as_rr: True
+
+When the script reads this file, it detects the file specifies a single
+component (called "section" here) that consists of a single A RDATA,
+which must be dumped as an RR (not only the part of RDATA).  It then
+dumps the following content:
+
+  # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=4)
+  076578616d706c6503636f6d00 0001 0001 00015180 0004
+  # Address=192.0.2.1
+  c0000201
+
+As can be seen, the script automatically completes all variable
+parameters of RRs: owner name, class, TTL, RDATA length and data.  For
+testing purposes many of these will be the same common one (like
+"example.com" or 192.0.2.1), so it would be convenient if we only have
+to specify non default parameters.  To change the RDATA (i.e., the
+IPv4 address), we should add the following line at the end of the spec
+file:
+
+  address: 192.0.2.2
+
+Then the last two lines of the output file will be as follows:
+
+  # Address=192.0.2.2
+  c0000202
+
+In some cases we would rather specify malformed data for tests.  This
+script has the ability to specify broken parameters for many types of
+data.  For example, we can generate data that would look like an A RR
+but the RDLEN is 3 by adding the following line to the spec file:
+
+  rdlen: 3
+
+Then the first two lines of the output file will be as follows:
+
+  # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=3)
+  076578616d706c6503636f6d00 0001 0001 00015180 0003
+
+** USAGE **
+
+  gen_wiredata.py [-o output_file] spec_file
+
+If the -o option is missing, and if the spec_file has a suffix (such as
+in the form of "data.spec"), the output file name will be the prefix
+part of it (as in "data"); if -o is missing and the spec_file does not
+have a suffix, the script will fail.
+
+** SPEC FILE SYNTAX **
+
+A spec file accepted in this script should be in the form of a
+configuration file that is parsable by the Python's standard
+configparser module.  In short, it consists of sections; each section
+is identified in the form of [section_name] followed by "name: value"
+entries.  Lines beginning with # or ; will be treated as comments.
+Refer to the configparser module documentation for further details of
+the general syntax.
+
+This script has two major modes: the custom mode and the DNS query
+mode.  The former generates an arbitrary combination of DNS message
+header, question section, RDATAs or RRs.  It is mainly intended to
+generate a test data for a single type of RDATA or RR, or for
+complicated complete DNS messages.  The DNS query mode is actually a
+special case of the custom mode, which is a shortcut to generate a
+simple DNS query message (with or without EDNS).
+
+* Custom mode syntax *
+
+By default this script assumes the DNS query mode.  To specify the
+custom mode, there must be a special "custom" section in the spec
+file, which should contain 'sections' entry.  This value of this
+entryis colon-separated string fields, each of which is either
+"header", "question", "edns", "name", or a string specifying an RR
+type.  For RR types the string is lower-cased string mnemonic that
+identifies the type: 'a' for type A, 'ns' for type NS, and so on
+(note: in the current implementation it's case sensitive, and must be
+lower cased).
+
+Each of these fields is interpreted as a section name of the spec
+(configuration), and in that section parameters specific to the
+semantics of the field can be configured.
+
+A "header" section specifies the content of a DNS message header.
+See the documentation of the DNSHeader class of this module for
+configurable parameters.
+
+A "question" section specifies the content of a single question that
+is normally to be placed in the Question section of a DNS message.
+See the documentation of the DNSQuestion class of this module for
+configurable parameters.
+
+An "edns" section specifies the content of an EDNS OPT RR.  See the
+documentation of the EDNS class of this module for configurable
+parameters.
+
+A "name" section specifies a domain name with or without compression.
+This is specifically intended to be used for testing name related
+functionalities and would rarely be used with other sections.  See the
+documentation of the Name class of this module for configurable
+parameters.
+
+In a specific section for an RR or RDATA, possible entries depend on
+the type.  But there are some common configurable entries.  See the
+description of the RR class.  The most important one would be "as_rr".
+It controls whether the entry should be treated as an RR (with name,
+type, class and TTL) or only as an RDATA.  By default as_rr is
+"False", so if an entry is to be intepreted as an RR, an as_rr entry
+must be explicitly specified with a value of "True".
+
+Another common entry is "rdlen".  It specifies the RDLEN field value
+of the RR (note: this is included when the entry is interpreted as
+RDATA, too).  By default this value is automatically determined by the
+RR type and (it has a variable length) from other fields of RDATA, but
+as shown in the above example, it can be explicitly set, possibly to a
+bogus value for testing against invalid data.
+
+For type specific entries (and their defaults when provided), see the
+documentation of the corresponding Python class defined in this
+module.  In general, there should be a class named the same mnemonic
+of the corresponding RR type for each supported type, and they are a
+subclass of the RR class.  For example, the "NS" class is defined for
+RR type NS.
+
+Look again at the A RR example shown at the beginning of this
+description.  There's a "custom" section, which consists of a
+"sections" entry whose value is a single "a", which means the data to
+be generated is an A RR or RDATA.  There's a corresponding "a"
+section, which only specifies that it should be interpreted as an RR
+(all field values of the RR are derived from the default).
+
+If you want to generate a data sequence for two ore more RRs or
+RDATAs, you can specify them in the form of colon-separated fields for
+the "sections" entry.  For example, to generate a sequence of A and NS
+RRs in that order, the "custom" section would be something like this:
+
+  [custom]
+  sections: a:ns
+
+and there must be an "ns" section in addtion to "a".
+
+If a sequence of two or more RRs/RDATAs of the same RR type should be
+generated, these should be uniquely indexed with the "/" separator.
+For example, to generate two A RRs, the "custom" section would be as
+follows:
+
+  [custom]
+  sections: a/1:a/2
+
+and there must be "a/1" and "a/2" sections.
+
+Another practical example that would be used for many tests is to
+generate data for a complete DNS ressponse message.  The spec file of
+such an example configuration would look like as follows:
+
+  [custom]
+  sections: header:question:a
+  [header]
+  qr: 1
+  ancount: 1
+  [question]
+  [a]
+  as_rr: True
+
+With this configuration, this script will generate test data for a DNS
+response to a query for example.com/IN/A containing one corresponding
+A RR in the answer section.
+
+* DNS query mode syntax *
+
+If the spec file does not contain a "custom" section (that has a
+"sections" entry), this script assumes the DNS query mode.  This mode
+is actually a special case of custom mode; it implicitly assumes the
+"sections" entry whose value is "header:question:edns".
+
+In this mode it is expected that the spec file also contains at least
+a "header" and "question" sections, and optionally an "edns" section.
+But the script does not warn or fail even if the expected sections are
+missing.
+
+* Entry value types *
+
+As described above, a section of the spec file accepts entries
+specific to the semantics of the section.  They generally correspond
+to DNS message or RR fields.
+
+Many of them are expected to be integral values, for which either decimal or
+hexadecimal representation is accepted, for example:
+
+  rr_ttl: 3600
+  tag: 0x1234
+
+Some others are expected to be string.  A string value does not have
+to be quated:
+
+  address: 192.0.2.2
+
+but can also be quoated with single quotes:
+
+  address: '192.0.2.2'
+
+Note 1: a string that can be interpreted as an integer must be quated.
+For example, if you want to set a "string" entry to "3600", it should
+be:
+
+  string: '3600'
+
+instead of
+
+  string: 3600
+
+Note 2: a string enclosed with double quotes is not accepted:
+
+  # This doesn't work:
+  address: "192.0.2.2"
+
+In general, string values are converted to hexadecimal sequences
+according to the semantics of the entry.  For instance, a textual IPv4
+address in the above example will be converted to a hexadecimal
+sequence corresponding to a 4-byte integer.  So, in many cases, the
+acceptable syntax for a particular string entry value should be
+obvious from the context.  There are still some exceptional cases
+especially for complicated RR field values, for which the
+corresponding class documentation should be referenced.
+
+One special string syntax that would be worth noting is domain names,
+which would natually be used in many kinds of entries.  The simplest
+form of acceptable syntax is a textual representation of domain names
+such as "example.com" (note: names are always assumed to be
+"absolute", so the trailing dot can be omitted).  But a domain name in
+the wire format can also contain a compression pointer.  This script
+provides a simple support for name compression with a special notation
+of "ptr=nn" where nn is the numeric pointer value (decimal).  For example,
+if the NSDNAME field of an NS RDATA is specified as follows:
+
+  nsname: ns.ptr=12
+
+this script will generate the following output:
+
+  # NS name=ns.ptr=12
+  026e73c00c
+
+** EXTEND THE SCRIPT **
+
+This script is expected to be extended as we add more support for
+various types of RR.  It is encouraged to add support for a new type
+of RR to this script as we see the need for testing that type.  Here
+is a simple instruction of how to do that.
+
+Assume you are adding support for "FOO" RR.  Also assume that the FOO
+RDATA contains a single field named "value".
+
+What you are expected to do is as follows:
+
+- Define a new class named "FOO" inherited from the RR class.  Also
+  define a class variable named "value" for the FOO RDATA field (the
+  variable name can be different from the field name, but it's
+  convenient if it can be easily identifiable.) with an appropriate
+  default value (if possible):
+
+    class FOO(RR):
+        value = 10
+
+  The name of the variable will be (automatically) used as the
+  corresponding entry name in the spec file.  So, a spec file that
+  sets this field to 20 would look like this:
+
+    [foo]
+    value: 20
+
+- Define the "dump()" method for class FOO.  It must call
+  self.dump_header() (which is derived from class RR) at the
+  beginning.  It then prints the RDATA field values in an appropriate
+  way.  Assuming the value is a 16-bit integer field, a complete
+  dump() method would look like this:
+
+    def dump(self, f):
+        if self.rdlen is None:
+            self.rdlen = 2
+        self.dump_header(f, self.rdlen)
+        f.write('# Value=%d\\n' % (self.value))
+        f.write('%04x\\n' % (self.value))
+
+  The first f.write() call is not mandatory, but is encouraged to
+  be provided so that the generated files will be more human readable.
+  Depending on the complexity of the RDATA fields, the dump()
+  implementation would be more complicated.  In particular, if the
+  RDATA length is variable and the RDLEN field value is not specified
+  in the spec file, the dump() method is normally expected to
+  calculate the correct length and pass it to dump_header().  See the
+  implementation of various derived classes of class RR for actual
+  examples.
+"""
+
+import configparser, re, time, socket, sys
+from datetime import datetime
+from optparse import OptionParser
+
+re_hex = re.compile(r'^0x[0-9a-fA-F]+')
+re_decimal = re.compile(r'^\d+$')
+re_string = re.compile(r"\'(.*)\'$")
+
+dnssec_timefmt = '%Y%m%d%H%M%S'
+
+dict_qr = { 'query' : 0, 'response' : 1 }
+dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
+                'update' : 5 }
+rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
+dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
+               'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
+               'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
+rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
+dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
+                'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
+                'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
+                'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
+                'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
+                'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
+                'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
+                'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
+                'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
+                'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
+                'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
+                'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
+                'maila' : 254, 'any' : 255 }
+rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
+dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
+rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
+                          dict_rrclass.keys()])
+dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
+                   'rsasha1' : 5 }
+dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
+rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
+                            dict_algorithm.keys()])
+rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
+                                  dict_nsec3_algorithm.keys()])
+
+header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
+                   'rcode' : dict_rcode }
+question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
+
+def parse_value(value, xtable = {}):
+    if re.search(re_hex, value):
+        return int(value, 16)
+    if re.search(re_decimal, value):
+        return int(value)
+    m = re.match(re_string, value)
+    if m:
+        return m.group(1)
+    lovalue = value.lower()
+    if lovalue in xtable:
+        return xtable[lovalue]
+    return value
+
+def code_totext(code, dict):
+    if code in dict.keys():
+        return dict[code] + '(' + str(code) + ')'
+    return str(code)
+
+def encode_name(name, absolute=True):
+    # make sure the name is dot-terminated.  duplicate dots will be ignored
+    # below.
+    name += '.'
+    labels = name.split('.')
+    wire = ''
+    for l in labels:
+        if len(l) > 4 and l[0:4] == 'ptr=':
+            # special meta-syntax for compression pointer
+            wire += '%04x' % (0xc000 | int(l[4:]))
+            break
+        if absolute or len(l) > 0:
+            wire += '%02x' % len(l)
+            wire += ''.join(['%02x' % ord(ch) for ch in l])
+        if len(l) == 0:
+            break
+    return wire
+
+def encode_string(name, len=None):
+    if type(name) is int and len is not None:
+        return '%0.*x' % (len * 2, name)
+    return ''.join(['%02x' % ord(ch) for ch in name])
+
+def count_namelabels(name):
+    if name == '.':             # special case
+        return 0
+    m = re.match('^(.*)\.$', name)
+    if m:
+        name = m.group(1)
+    return len(name.split('.'))
+
+def get_config(config, section, configobj, xtables = {}):
+    try:
+        for field in config.options(section):
+            value = config.get(section, field)
+            if field in xtables.keys():
+                xtable = xtables[field]
+            else:
+                xtable = {}
+            configobj.__dict__[field] = parse_value(value, xtable)
+    except configparser.NoSectionError:
+        return False
+    return True
+
+def print_header(f, input_file):
+    f.write('''###
+### This data file was auto-generated from ''' + input_file + '''
+###
+''')
+
+class Name:
+    '''Implements rendering a single domain name in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - name (string): A textual representation of the name, such as
+      'example.com'.
+    - pointer (int): If specified, compression pointer will be
+      prepended to the generated data with the offset being the value
+      of this parameter.
+    '''
+
+    name = 'example.com'
+    pointer = None                # no compression by default
+    def dump(self, f):
+        name = self.name
+        if self.pointer is not None:
+            if len(name) > 0 and name[-1] != '.':
+                name += '.'
+            name += 'ptr=%d' % self.pointer
+        name_wire = encode_name(name)
+        f.write('\n# DNS Name: %s' % self.name)
+        if self.pointer is not None:
+            f.write(' + compression pointer: %d' % self.pointer)
+        f.write('\n')
+        f.write('%s' % name_wire)
+        f.write('\n')
+
+class DNSHeader:
+    '''Implements rendering a DNS Header section in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - id (16-bit int):
+    - qr, aa, tc, rd, ra, ad, cd (0 or 1): Standard header bits as
+      defined in RFC1035 and RFC4035.  If set to 1, the corresponding
+      bit will be set; if set to 0, it will be cleared.
+    - mbz (0-3): The reserved field of the 3rd and 4th octets of the
+      header.
+    - rcode (4-bit int or string): The RCODE field.  If specified as a
+      string, it must be the commonly used textual mnemonic of the RCODEs
+      (NOERROR, FORMERR, etc, case insensitive).
+    - opcode (4-bit int or string): The OPCODE field.  If specified as
+      a string, it must be the commonly used textual mnemonic of the
+      OPCODEs (QUERY, NOTIFY, etc, case insensitive).
+    - qdcount, ancount, nscount, arcount (16-bit int): The QD/AN/NS/AR
+      COUNT fields, respectively.
+    '''
+
+    id = 0x1035
+    (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
+    mbz = 0
+    rcode = 0                   # noerror
+    opcode = 0                  # query
+    (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
+
+    def dump(self, f):
+        f.write('\n# Header Section\n')
+        f.write('# ID=' + str(self.id))
+        f.write(' QR=' + ('Response' if self.qr else 'Query'))
+        f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
+        f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
+        f.write('%s' % (' AA' if self.aa else ''))
+        f.write('%s' % (' TC' if self.tc else ''))
+        f.write('%s' % (' RD' if self.rd else ''))
+        f.write('%s' % (' AD' if self.ad else ''))
+        f.write('%s' % (' CD' if self.cd else ''))
+        f.write('\n')
+        f.write('%04x ' % self.id)
+        flag_and_code = 0
+        flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
+                          self.tc << 9 | self.rd << 8 | self.ra << 7 |
+                          self.mbz << 6 | self.ad << 5 | self.cd << 4 |
+                          self.rcode)
+        f.write('%04x\n' % flag_and_code)
+        f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
+                (self.qdcount, self.ancount, self.nscount, self.arcount))
+        f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
+                                           self.nscount, self.arcount))
+
+class DNSQuestion:
+    '''Implements rendering a DNS question in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - name (string): The QNAME.  The string must be interpreted as a
+      valid domain name.
+    - rrtype (int or string): The question type.  If specified
+      as an integer, it must be the 16-bit RR type value of the
+      covered type.  If specifed as a string, it must be the textual
+      mnemonic of the type.
+    - rrclass (int or string): The question class.  If specified as an
+      integer, it must be the 16-bit RR class value of the covered
+      type.  If specifed as a string, it must be the textual mnemonic
+      of the class.
+    '''
+    name = 'example.com.'
+    rrtype = parse_value('A', dict_rrtype)
+    rrclass = parse_value('IN', dict_rrclass)
+
+    def dump(self, f):
+        f.write('\n# Question Section\n')
+        f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
+                (self.name,
+                 code_totext(self.rrtype, rdict_rrtype),
+                 code_totext(self.rrclass, rdict_rrclass)))
+        f.write(encode_name(self.name))
+        f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
+
+class EDNS:
+    '''Implements rendering EDNS OPT RR in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - name (string): The owner name of the OPT RR.  The string must be
+      interpreted as a valid domain name.
+    - udpsize (16-bit int): The UDP payload size (set as the RR class)
+    - extrcode (8-bit int): The upper 8 bits of the extended RCODE.
+    - version (8-bit int): The EDNS version.
+    - do (int): The DNSSEC DO bit.  The bit will be set if this value
+      is 1; otherwise the bit will be unset.
+    - mbz (15-bit int): The rest of the flags field.
+    - rdlen (16-bit int): The RDLEN field.  Note: right now specifying
+      a non 0 value (except for making bogus data) doesn't make sense
+      because there is no way to configure RDATA.
+    '''
+    name = '.'
+    udpsize = 4096
+    extrcode = 0
+    version = 0
+    do = 0
+    mbz = 0
+    rdlen = 0
+    def dump(self, f):
+        f.write('\n# EDNS OPT RR\n')
+        f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
+                (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
+                 self.udpsize, self.extrcode, self.version,
+                 1 if self.do else 0))
+        
+        code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
+        extflags = (self.do << 15) | (self.mbz & ~0x8000)
+        f.write('%s %04x %04x %04x %04x\n' %
+                (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
+                 code_vers, extflags))
+        f.write('# RDLEN=%d\n' % self.rdlen)
+        f.write('%04x\n' % self.rdlen)
+
+class RR:
+    '''This is a base class for various types of RR test data.
+    For each RR type (A, AAAA, NS, etc), we define a derived class of RR
+    to dump type specific RDATA parameters.  This class defines parameters
+    common to all types of RDATA, namely the owner name, RR class and TTL.
+    The dump() method of derived classes are expected to call dump_header(),
+    whose default implementation is provided in this class.  This method
+    decides whether to dump the test data as an RR (with name, type, class)
+    or only as RDATA (with its length), and dumps the corresponding data
+    via the specified file object.
+
+    By convention we assume derived classes are named after the common
+    standard mnemonic of the corresponding RR types.  For example, the
+    derived class for the RR type SOA should be named "SOA".
+
+    Configurable parameters are as follows:
+    - as_rr (bool): Whether or not the data is to be dumped as an RR.
+      False by default.
+    - rr_name (string): The owner name of the RR.  The string must be
+      interpreted as a valid domain name (compression pointer can be
+      contained).  Default is 'example.com.'
+    - rr_class (string): The RR class of the data.  Only meaningful
+      when the data is dumped as an RR.  Default is 'IN'.
+    - rr_ttl (int): The TTL value of the RR.  Only meaningful when
+      the data is dumped as an RR.  Default is 86400 (1 day).
+    - rdlen (int): 16-bit RDATA length.  It can be None (i.e. omitted
+      in the spec file), in which case the actual length of the
+      generated RDATA is automatically determined and used; if
+      negative, the RDLEN field will be omitted from the output data.
+      (Note that omitting RDLEN with as_rr being True is mostly
+      meaningless, although the script doesn't complain about it).
+      Default is None.
+    '''
+
+    def __init__(self):
+        self.as_rr = False
+        # only when as_rr is True, same for class/TTL:
+        self.rr_name = 'example.com'
+        self.rr_class = 'IN'
+        self.rr_ttl = 86400
+        self.rdlen = None
+
+    def dump_header(self, f, rdlen):
+        type_txt = self.__class__.__name__
+        type_code = parse_value(type_txt, dict_rrtype)
+        rdlen_spec = ''
+        rdlen_data = ''
+        if rdlen >= 0:
+            rdlen_spec = ', RDLEN=%d' % rdlen
+            rdlen_data = '%04x' % rdlen
+        if self.as_rr:
+            rrclass = parse_value(self.rr_class, dict_rrclass)
+            f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d%s)\n' %
+                    (type_txt, self.rr_name,
+                     code_totext(rrclass, rdict_rrclass), self.rr_ttl,
+                     rdlen_spec))
+            f.write('%s %04x %04x %08x %s\n' %
+                    (encode_name(self.rr_name), type_code, rrclass,
+                     self.rr_ttl, rdlen_data))
+        else:
+            f.write('\n# %s RDATA%s\n' % (type_txt, rdlen_spec))
+            f.write('%s\n' % rdlen_data)
+
+class A(RR):
+    '''Implements rendering A RDATA (of class IN) in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - address (string): The address field.  This must be a valid textual
+      IPv4 address.
+    '''
+    RDLEN_DEFAULT = 4           # fixed by default
+    address = '192.0.2.1'
+
+    def dump(self, f):
+        if self.rdlen is None:
+            self.rdlen = self.RDLEN_DEFAULT
+        self.dump_header(f, self.rdlen)
+        f.write('# Address=%s\n' % (self.address))
+        bin_address = socket.inet_aton(self.address)
+        f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
+                                        bin_address[2], bin_address[3]))
+
+class AAAA(RR):
+    '''Implements rendering AAAA RDATA (of class IN) in the test data
+    format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - address (string): The address field.  This must be a valid textual
+      IPv6 address.
+    '''
+    RDLEN_DEFAULT = 16          # fixed by default
+    address = '2001:db8::1'
+
+    def dump(self, f):
+        if self.rdlen is None:
+            self.rdlen = self.RDLEN_DEFAULT
+        self.dump_header(f, self.rdlen)
+        f.write('# Address=%s\n' % (self.address))
+        bin_address = socket.inet_pton(socket.AF_INET6, self.address)
+        [f.write('%02x' % x) for x in bin_address]
+        f.write('\n')
+
+class NS(RR):
+    '''Implements rendering NS RDATA in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - nsname (string): The NSDNAME field.  The string must be
+      interpreted as a valid domain name.
+    '''
+
+    nsname = 'ns.example.com'
+
+    def dump(self, f):
+        nsname_wire = encode_name(self.nsname)
+        if self.rdlen is None:
+            self.rdlen = len(nsname_wire) / 2
+        self.dump_header(f, self.rdlen)
+        f.write('# NS name=%s\n' % (self.nsname))
+        f.write('%s\n' % nsname_wire)
+
+class SOA(RR):
+    '''Implements rendering SOA RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - mname/rname (string): The MNAME/RNAME fields, respectively.  The
+      string must be interpreted as a valid domain name.
+    - serial (32-bit int): The SERIAL field
+    - refresh (32-bit int): The REFRESH field
+    - retry (32-bit int): The RETRY field
+    - expire (32-bit int): The EXPIRE field
+    - minimum (32-bit int): The MINIMUM field
+    '''
+
+    mname = 'ns.example.com'
+    rname = 'root.example.com'
+    serial = 2010012601
+    refresh = 3600
+    retry = 300
+    expire = 3600000
+    minimum = 1200
+    def dump(self, f):
+        mname_wire = encode_name(self.mname)
+        rname_wire = encode_name(self.rname)
+        if self.rdlen is None:
+            self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
+        self.dump_header(f, self.rdlen)
+        f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
+        f.write('%s %s\n' % (mname_wire, rname_wire))
+        f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
+                (self.serial, self.refresh, self.retry, self.expire,
+                 self.minimum))
+        f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
+                                                self.retry, self.expire,
+                                                self.minimum))
+
+class TXT(RR):
+    '''Implements rendering TXT RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - nstring (int): number of character-strings
+    - stringlenN (int) (int, N = 0, ..., nstring-1): the length of the
+      N-th character-string.
+    - stringN (string, N = 0, ..., nstring-1): the N-th
+      character-string.
+    - stringlen (int): the default string.  If nstring >= 1 and the
+      corresponding stringlenN isn't specified in the spec file, this
+      value will be used.  If this parameter isn't specified either,
+      the length of the string will be used.  Note that it means
+      this parameter (or any stringlenN) doesn't have to be specified
+      unless you want to intentially build a broken character string.
+    - string (string): the default string.  If nstring >= 1 and the
+      corresponding stringN isn't specified in the spec file, this
+      string will be used.
+    '''
+
+    nstring = 1
+    stringlen = None
+    string = 'Test String'
+
+    def dump(self, f):
+        stringlen_list = []
+        string_list = []
+        wirestring_list = []
+        for i in range(0, self.nstring):
+            key_string = 'string' + str(i)
+            if key_string in self.__dict__:
+                string_list.append(self.__dict__[key_string])
+            else:
+                string_list.append(self.string)
+            wirestring_list.append(encode_string(string_list[-1]))
+            key_stringlen = 'stringlen' + str(i)
+            if key_stringlen in self.__dict__:
+                stringlen_list.append(self.__dict__[key_stringlen])
+            else:
+                stringlen_list.append(self.stringlen)
+            if stringlen_list[-1] is None:
+                stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
+        if self.rdlen is None:
+            self.rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
+        self.dump_header(f, self.rdlen)
+        for i in range(0, self.nstring):
+            f.write('# String Len=%d, String=\"%s\"\n' %
+                    (stringlen_list[i], string_list[i]))
+            f.write('%02x%s%s\n' % (stringlen_list[i],
+                                    ' ' if len(wirestring_list[i]) > 0 else '',
+                                    wirestring_list[i]))
+
+class RP(RR):
+    '''Implements rendering RP RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - mailbox (string): The mailbox field.
+    - text (string): The text field.
+    These strings must be interpreted as a valid domain name.
+    '''
+    mailbox = 'root.example.com'
+    text = 'rp-text.example.com'
+    def dump(self, f):
+        mailbox_wire = encode_name(self.mailbox)
+        text_wire = encode_name(self.text)
+        if self.rdlen is None:
+            self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
+        else:
+            self.rdlen = int(self.rdlen)
+        self.dump_header(f, self.rdlen)
+        f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
+        f.write('%s %s\n' % (mailbox_wire, text_wire))
+
+class NSECBASE(RR):
+    '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
+    these RRs.  The NSEC and NSEC3 classes will be inherited from this
+    class.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - nbitmap (int): The number of type bitmaps.
+    The following three define the bitmaps.  If suffixed with "N"
+    (0 <= N < nbitmaps), it means the definition for the N-th bitmap.
+    If there is no suffix (e.g., just "block", it means the default
+    for any unspecified values)
+    - block[N] (8-bit int): The Window Block.
+    - maplen[N] (8-bit int): The Bitmap Length.  The default "maplen"
+      can also be unspecified (with being set to None), in which case
+      the corresponding length will be calculated from the bitmap.
+    - bitmap[N] (string): The Bitmap.  This must be the hexadecimal
+      representation of the bitmap field.  For example, for a bitmap
+      where the 7th and 15th bits (and only these bits) are set, it
+      must be '0101'.  Note also that the value must be quated with
+      single quatations because it could also be interpreted as an
+      integer.
+    '''
+    nbitmap = 1                 # number of bitmaps
+    block = 0
+    maplen = None              # default bitmap length, auto-calculate
+    bitmap = '040000000003'     # an arbtrarily chosen bitmap sample
+    def dump(self, f):
+        # first, construct the bitmpa data
+        block_list = []
+        maplen_list = []
+        bitmap_list = []
+        for i in range(0, self.nbitmap):
+            key_bitmap = 'bitmap' + str(i)
+            if key_bitmap in self.__dict__:
+                bitmap_list.append(self.__dict__[key_bitmap])
+            else:
+                bitmap_list.append(self.bitmap)
+            key_maplen = 'maplen' + str(i)
+            if key_maplen in self.__dict__:
+                maplen_list.append(self.__dict__[key_maplen])
+            else:
+                maplen_list.append(self.maplen)
+            if maplen_list[-1] is None: # calculate it if not specified
+                maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
+            key_block = 'block' + str(i)
+            if key_block in self.__dict__:
+               block_list.append(self.__dict__[key_block])
+            else:
+                block_list.append(self.block)
+
+        # dump RR-type specific part (NSEC or NSEC3)
+        self.dump_fixedpart(f, 2 * self.nbitmap + \
+                                int(len(''.join(bitmap_list)) / 2))
+
+        # dump the bitmap
+        for i in range(0, self.nbitmap):
+            f.write('# Bitmap: Block=%d, Length=%d\n' %
+                    (block_list[i], maplen_list[i]))
+            f.write('%02x %02x %s\n' %
+                    (block_list[i], maplen_list[i], bitmap_list[i]))
+
+class NSEC(NSECBASE):
+    '''Implements rendering NSEC RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - Type bitmap related parameters: see class NSECBASE
+    - nextname (string): The Next Domain Name field.  The string must be
+      interpreted as a valid domain name.
+    '''
+
+    nextname = 'next.example.com'
+    def dump_fixedpart(self, f, bitmap_totallen):
+        name_wire = encode_name(self.nextname)
+        if self.rdlen is None:
+            # if rdlen needs to be calculated, it must be based on the bitmap
+            # length, because the configured maplen can be fake.
+            self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
+        self.dump_header(f, self.rdlen)
+        f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
+                                                 int(len(name_wire) / 2)))
+        f.write('%s\n' % name_wire)
+
+class NSEC3(NSECBASE):
+    '''Implements rendering NSEC3 RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - Type bitmap related parameters: see class NSECBASE
+    - hashalg (8-bit int): The Hash Algorithm field.  Note that
+      currently the only defined algorithm is SHA-1, for which a value
+      of 1 will be used, and it's the default.  So this implementation
+      does not support any string representation right now.
+    - optout (bool): The Opt-Out flag of the Flags field.
+    - mbz (7-bit int): The rest of the Flags field.  This value will
+      be left shifted for 1 bit and then OR-ed with optout to
+      construct the complete Flags field.
+    - iterations (16-bit int): The Iterations field.
+    - saltlen (int): The Salt Length field.
+    - salt (string): The Salt field.  It is converted to a sequence of
+      ascii codes and its hexadecimal representation will be used.
+    - hashlen (int): The Hash Length field.
+    - hash (string): The Next Hashed Owner Name field.  This parameter
+      is interpreted as "salt".
+    '''
+
+    hashalg = 1                 # SHA-1
+    optout = False              # opt-out flag
+    mbz = 0                     # other flag fields (none defined yet)
+    iterations = 1
+    saltlen = 5
+    salt = 's' * saltlen
+    hashlen = 20
+    hash = 'h' * hashlen
+    def dump_fixedpart(self, f, bitmap_totallen):
+        if self.rdlen is None:
+            # if rdlen needs to be calculated, it must be based on the bitmap
+            # length, because the configured maplen can be fake.
+            self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
+                + bitmap_totallen
+        self.dump_header(f, self.rdlen)
+        optout_val = 1 if self.optout else 0
+        f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
+                (code_totext(self.hashalg, rdict_nsec3_algorithm),
+                 optout_val, self.mbz, self.iterations))
+        f.write('%02x %02x %04x\n' %
+                (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
+        f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
+        f.write('%02x%s%s\n' % (self.saltlen,
+                                ' ' if len(self.salt) > 0 else '',
+                                encode_string(self.salt)))
+        f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
+        f.write('%02x%s%s\n' % (self.hashlen,
+                                ' ' if len(self.hash) > 0 else '',
+                                encode_string(self.hash)))
+
+class RRSIG(RR):
+    '''Implements rendering RRSIG RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - covered (int or string): The Type Covered field.  If specified
+      as an integer, it must be the 16-bit RR type value of the
+      covered type.  If specifed as a string, it must be the textual
+      mnemonic of the type.
+    - algorithm (int or string): The Algorithm field.   If specified
+      as an integer, it must be the 8-bit algorithm number as defined
+      in RFC4034.  If specifed as a string, it must be one of the keys
+      of dict_algorithm (case insensitive).
+    - labels (int): The Labels field.  If omitted (the corresponding
+      variable being set to None), the number of labels of "signer"
+      (excluding the trailing null label as specified in RFC4034) will
+      be used.
+    - originalttl (32-bit int): The Original TTL field.
+    - expiration (32-bit int): The Expiration TTL field.
+    - inception (32-bit int): The Inception TTL field.
+    - tag (16-bit int): The Key Tag field.
+    - signer (string): The Signer's Name field.  The string must be
+      interpreted as a valid domain name.
+    - signature (int): The Signature field.  Right now only a simple
+      integer form is supported.  A prefix of "0" will be prepended if
+      the resulting hexadecimal representation consists of an odd
+      number of characters.
+    '''
+
+    covered = 'A'
+    algorithm = 'RSASHA1'
+    labels = None                 # auto-calculate (#labels of signer)
+    originalttl = 3600
+    expiration = int(time.mktime(datetime.strptime('20100131120000',
+                                                   dnssec_timefmt).timetuple()))
+    inception = int(time.mktime(datetime.strptime('20100101120000',
+                                                  dnssec_timefmt).timetuple()))
+    tag = 0x1035
+    signer = 'example.com'
+    signature = 0x123456789abcdef123456789abcdef
+
+    def dump(self, f):
+        name_wire = encode_name(self.signer)
+        sig_wire = '%x' % self.signature
+        if len(sig_wire) % 2 != 0:
+            sig_wire = '0' + sig_wire
+        if self.rdlen is None:
+            self.rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
+        self.dump_header(f, self.rdlen)
+
+        if type(self.covered) is str:
+            self.covered = dict_rrtype[self.covered.lower()]
+        if type(self.algorithm) is str:
+            self.algorithm = dict_algorithm[self.algorithm.lower()]
+        if self.labels is None:
+            self.labels = count_namelabels(self.signer)
+        f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
+                (code_totext(self.covered, rdict_rrtype),
+                 code_totext(self.algorithm, rdict_algorithm), self.labels,
+                 self.originalttl))
+        f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
+                                           self.labels, self.originalttl))
+        f.write('# Expiration=%s, Inception=%s\n' %
+                (str(self.expiration), str(self.inception)))
+        f.write('%08x %08x\n' % (self.expiration, self.inception))
+        f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
+        f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
+
+class TSIG(RR):
+    '''Implements rendering TSIG RDATA in the test data format.
+
+    As a meta RR type TSIG uses some non common parameters.  This
+    class overrides some of the default attributes of the RR class
+    accordingly:
+    - rr_class is set to 'ANY'
+    - rr_ttl is set to 0
+    Like other derived classes these can be overridden via the spec
+    file.
+
+    Other configurable parameters are as follows (see the description
+    of the same name of attribute for the default value):
+    - algorithm (string): The Algorithm Name field.  The value is
+      generally interpreted as a domain name string, and will
+      typically be one of the standard algorithm names defined in
+      RFC4635.  For convenience, however, a shortcut value "hmac-md5"
+      is allowed instead of the standard "hmac-md5.sig-alg.reg.int".
+    - time_signed (48-bit int): The Time Signed field.
+    - fudge (16-bit int): The Fudge field.
+    - mac_size (int): The MAC Size field.  If omitted, the common value
+      determined by the algorithm will be used.
+    - mac (int or string): The MAC field.  If specified as an integer,
+      the integer value is used as the MAC, possibly with prepended
+      0's so that the total length will be mac_size.  If specifed as a
+      string, it is converted to a sequence of ascii codes and its
+      hexadecimal representation will be used.  So, for example, if
+      "mac" is set to 'abc', it will be converted to '616263'.  Note
+      that in this case the length of "mac" may not be equal to
+      mac_size.  If unspecified, the mac_size number of '78' (ascii
+      code of 'x') will be used.
+    - original_id (16-bit int): The Original ID field.
+    - error (16-bit int): The Error field.
+    - other_len (int): The Other Len field.
+    - other_data (int or string): The Other Data field.  This is
+      interpreted just like "mac" except that other_len is used
+      instead of mac_size.  If unspecified this will be empty unless
+      the "error" is set to 18 (which means the "BADTIME" error), in
+      which case a hexadecimal representation of "time_signed + fudge
+      + 1" will be used.
+    '''
+
+    algorithm = 'hmac-sha256'
+    time_signed = 1286978795    # arbitrarily chosen default
+    fudge = 300
+    mac_size = None             # use a common value for the algorithm
+    mac = None                  # use 'x' * mac_size
+    original_id = 2845          # arbitrarily chosen default
+    error = 0
+    other_len = None         # 6 if error is BADTIME; otherwise 0
+    other_data = None        # use time_signed + fudge + 1 for BADTIME
+    dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
+
+    # TSIG has some special defaults
+    def __init__(self):
+        super().__init__()
+        self.rr_class = 'ANY'
+        self.rr_ttl = 0
+
+    def dump(self, f):
+        if str(self.algorithm) == 'hmac-md5':
+            name_wire = encode_name('hmac-md5.sig-alg.reg.int')
+        else:
+            name_wire = encode_name(self.algorithm)
+        mac_size = self.mac_size
+        if mac_size is None:
+            if self.algorithm in self.dict_macsize.keys():
+                mac_size = self.dict_macsize[self.algorithm]
+            else:
+                raise RuntimeError('TSIG Mac Size cannot be determined')
+        mac = encode_string('x' * mac_size) if self.mac is None else \
+            encode_string(self.mac, mac_size)
+        other_len = self.other_len
+        if other_len is None:
+            # 18 = BADTIME
+            other_len = 6 if self.error == 18 else 0
+        other_data = self.other_data
+        if other_data is None:
+            other_data = '%012x' % (self.time_signed + self.fudge + 1) \
+                if self.error == 18 else ''
+        else:
+            other_data = encode_string(self.other_data, other_len)
+        if self.rdlen is None:
+            self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
+                                 len(other_data) / 2)
+        self.dump_header(f, self.rdlen)
+        f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
+                (self.algorithm, self.time_signed, self.fudge))
+        f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
+        f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
+        f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
+        f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
+        f.write('%04x %04x\n' %  (self.original_id, self.error))
+        f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
+        f.write('%04x%s\n' % (other_len,
+                              ' ' + other_data if len(other_data) > 0 else ''))
+
+# Build section-class mapping
+config_param = { 'name' : (Name, {}),
+                 'header' : (DNSHeader, header_xtables),
+                 'question' : (DNSQuestion, question_xtables),
+                 'edns' : (EDNS, {}) }
+for rrtype in dict_rrtype.keys():
+    # For any supported RR types add the tuple of (RR_CLASS, {}).
+    # We expect KeyError as not all the types are supported, and simply
+    # ignore them.
+    try:
+        cur_mod = sys.modules[__name__]
+        config_param[rrtype] = (cur_mod.__dict__[rrtype.upper()], {})
+    except KeyError:
+        pass
+
+def get_config_param(section):
+    s = section
+    m = re.match('^([^:]+)/\d+$', section)
+    if m:
+        s = m.group(1)
+    return config_param[s]
+
+usage = '''usage: %prog [options] input_file'''
+
+if __name__ == "__main__":
+    parser = OptionParser(usage=usage)
+    parser.add_option('-o', '--output', action='store', dest='output',
+                      default=None, metavar='FILE',
+                      help='output file name [default: prefix of input_file]')
+    (options, args) = parser.parse_args()
+
+    if len(args) == 0:
+        parser.error('input file is missing')
+    configfile = args[0]
+
+    outputfile = options.output
+    if not outputfile:
+        m = re.match('(.*)\.[^.]+$', configfile)
+        if m:
+            outputfile = m.group(1)
+        else:
+            raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
+
+    config = configparser.SafeConfigParser()
+    config.read(configfile)
+
+    output = open(outputfile, 'w')
+
+    print_header(output, configfile)
+
+    # First try the 'custom' mode; if it fails assume the query mode.
+    try:
+        sections = config.get('custom', 'sections').split(':')
+    except configparser.NoSectionError:
+        sections = ['header', 'question', 'edns']
+
+    for s in sections:
+        section_param = get_config_param(s)
+        (obj, xtables) = (section_param[0](), section_param[1])
+        if get_config(config, s, obj, xtables):
+            obj.dump(output)
+
+    output.close()
diff --git a/src/lib/util/strutil.cc b/src/lib/util/strutil.cc
index 161f9ac..ed7fc9b 100644
--- a/src/lib/util/strutil.cc
+++ b/src/lib/util/strutil.cc
@@ -132,6 +132,17 @@ format(const std::string& format, const std::vector<std::string>& args) {
     return (result);
 }
 
+std::string
+getToken(std::istringstream& iss) {
+    string token;
+    iss >> token;
+    if (iss.bad() || iss.fail()) {
+        isc_throw(StringTokenError, "could not read token from string");
+    }
+    return (token);
+}
+
+
 } // namespace str
 } // namespace util
 } // namespace isc
diff --git a/src/lib/util/strutil.h b/src/lib/util/strutil.h
index e044c15..021c236 100644
--- a/src/lib/util/strutil.h
+++ b/src/lib/util/strutil.h
@@ -18,7 +18,10 @@
 #include <algorithm>
 #include <cctype>
 #include <string>
+#include <sstream>
 #include <vector>
+#include <exceptions/exceptions.h>
+#include <boost/lexical_cast.hpp>
 
 namespace isc {
 namespace util {
@@ -26,6 +29,16 @@ namespace str {
 
 /// \brief A Set of C++ Utilities for Manipulating Strings
 
+///
+/// \brief A standard string util exception that is thrown if getToken or
+/// numToToken are called with bad input data
+///
+class StringTokenError : public Exception {
+public:
+    StringTokenError(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
 /// \brief Normalize Backslash
 ///
 /// Only relevant to Windows, this replaces all "\" in a string with "/" and
@@ -140,6 +153,55 @@ std::string format(const std::string& format,
     const std::vector<std::string>& args);
 
 
+/// \brief Returns one token from the given stringstream
+///
+/// Using the >> operator, with basic error checking
+///
+/// \exception StringTokenError if the token cannot be read from the stream
+///
+/// \param iss stringstream to read one token from
+///
+/// \return the first token read from the stringstream
+std::string getToken(std::istringstream& iss);
+
+/// \brief Converts a string token to an *unsigned* integer.
+///
+/// The value is converted using a lexical cast, with error and bounds
+/// checking.
+///
+/// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
+/// wide to store resulting integers.
+///
+/// BitSize is the maximum number of bits that the resulting integer can take.
+/// This function first checks whether the given token can be converted to
+/// an integer of NumType type.  It then confirms the conversion result is
+/// within the valid range, i.e., [0, 2^BitSize - 1].  The second check is
+/// necessary because lexical_cast<T> where T is an unsigned integer type
+/// doesn't correctly reject negative numbers when compiled with SunStudio.
+///
+/// \exception StringTokenError if the value is out of range, or if it
+///            could not be converted
+///
+/// \param num_token the string token to convert
+///
+/// \return the converted value, of type NumType
+template <typename NumType, int BitSize>
+NumType
+tokenToNum(const std::string& num_token) {
+    NumType num;
+    try {
+        num = boost::lexical_cast<NumType>(num_token);
+    } catch (const boost::bad_lexical_cast& ex) {
+        isc_throw(StringTokenError, "Invalid SRV numeric parameter: " <<
+                  num_token);
+    }
+    if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
+        isc_throw(StringTokenError, "Numeric SRV parameter out of range: " <<
+                  num);
+    }
+    return (num);
+}
+
 } // namespace str
 } // namespace util
 } // namespace isc
diff --git a/src/lib/util/tests/strutil_unittest.cc b/src/lib/util/tests/strutil_unittest.cc
index cd3a9ca..74bc17d 100644
--- a/src/lib/util/tests/strutil_unittest.cc
+++ b/src/lib/util/tests/strutil_unittest.cc
@@ -12,6 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <stdint.h>
+
 #include <string>
 
 #include <gtest/gtest.h>
@@ -22,17 +24,9 @@ using namespace isc;
 using namespace isc::util;
 using namespace std;
 
-class StringUtilTest : public ::testing::Test {
-protected:
-    StringUtilTest()
-    {
-    }
-};
-
-
 // Check for slash replacement
 
-TEST_F(StringUtilTest, Slash) {
+TEST(StringUtilTest, Slash) {
 
     string instring = "";
     isc::util::str::normalizeSlash(instring);
@@ -49,7 +43,7 @@ TEST_F(StringUtilTest, Slash) {
 
 // Check that leading and trailing space trimming works
 
-TEST_F(StringUtilTest, Trim) {
+TEST(StringUtilTest, Trim) {
 
     // Empty and full string.
     EXPECT_EQ("", isc::util::str::trim(""));
@@ -71,7 +65,7 @@ TEST_F(StringUtilTest, Trim) {
 // returned vector; if not as expected, the following references may be invalid
 // so should not be used.
 
-TEST_F(StringUtilTest, Tokens) {
+TEST(StringUtilTest, Tokens) {
     vector<string>  result;
 
     // Default delimiters
@@ -157,7 +151,7 @@ TEST_F(StringUtilTest, Tokens) {
 
 // Changing case
 
-TEST_F(StringUtilTest, ChangeCase) {
+TEST(StringUtilTest, ChangeCase) {
     string mixed("abcDEFghiJKLmno123[]{=+--+]}");
     string upper("ABCDEFGHIJKLMNO123[]{=+--+]}");
     string lower("abcdefghijklmno123[]{=+--+]}");
@@ -173,7 +167,7 @@ TEST_F(StringUtilTest, ChangeCase) {
 
 // Formatting
 
-TEST_F(StringUtilTest, Formatting) {
+TEST(StringUtilTest, Formatting) {
 
     vector<string> args;
     args.push_back("arg1");
@@ -213,3 +207,63 @@ TEST_F(StringUtilTest, Formatting) {
     string format9 = "%s %s";
     EXPECT_EQ(format9, isc::util::str::format(format9, args));
 }
+
+TEST(StringUtilTest, getToken) {
+    string s("a b c");
+    istringstream ss(s);
+    EXPECT_EQ("a", isc::util::str::getToken(ss));
+    EXPECT_EQ("b", isc::util::str::getToken(ss));
+    EXPECT_EQ("c", isc::util::str::getToken(ss));
+    EXPECT_THROW(isc::util::str::getToken(ss), isc::util::str::StringTokenError);
+}
+
+int32_t tokenToNumCall_32_16(const string& token) {
+    return isc::util::str::tokenToNum<int32_t, 16>(token);
+}
+
+int16_t tokenToNumCall_16_8(const string& token) {
+    return isc::util::str::tokenToNum<int16_t, 8>(token);
+}
+
+TEST(StringUtilTest, tokenToNum) {
+    uint32_t num32 = tokenToNumCall_32_16("0");
+    EXPECT_EQ(0, num32);
+    num32 = tokenToNumCall_32_16("123");
+    EXPECT_EQ(123, num32);
+    num32 = tokenToNumCall_32_16("65535");
+    EXPECT_EQ(65535, num32);
+
+    EXPECT_THROW(tokenToNumCall_32_16(""),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("a"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("-1"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("65536"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("1234567890"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("-1234567890"),
+                 isc::util::str::StringTokenError);
+
+    uint16_t num16 = tokenToNumCall_16_8("123");
+    EXPECT_EQ(123, num16);
+    num16 = tokenToNumCall_16_8("0");
+    EXPECT_EQ(0, num16);
+    num16 = tokenToNumCall_16_8("255");
+    EXPECT_EQ(255, num16);
+
+    EXPECT_THROW(tokenToNumCall_16_8(""),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("a"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("-1"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("256"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("1234567890"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("-1234567890"),
+                 isc::util::str::StringTokenError);
+
+}




More information about the bind10-changes mailing list